source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
__clang_hip_math.h | /*===---- __clang_hip_math.h - Device-side HIP math support ----------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __CLANG_HIP_MATH_H__
#define __CLANG_HIP_MATH_H__
#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <algorithm>
#endif
#include <limits.h>
#include <stdint.h>
#ifdef __OPENMP_AMDGCN__
#include <omp.h>
#endif
#endif // !defined(__HIPCC_RTC__)
#pragma push_macro("__DEVICE__")
#ifdef __OPENMP_AMDGCN__
#define __DEVICE__ static inline __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
#endif
// A few functions return bool type starting only in C++11.
#pragma push_macro("__RETURN_TYPE")
#ifdef __OPENMP_AMDGCN__
#define __RETURN_TYPE int
#else
#if defined(__cplusplus)
#define __RETURN_TYPE bool
#else
#define __RETURN_TYPE int
#endif
#endif // __OPENMP_AMDGCN__
#if defined (__cplusplus) && __cplusplus < 201103L
// emulate static_assert on type sizes
template<bool>
struct __compare_result{};
template<>
struct __compare_result<true> {
static const __device__ bool valid;
};
__DEVICE__
void __suppress_unused_warning(bool b){};
template <unsigned int S, unsigned int T>
__DEVICE__ void __static_assert_equal_size() {
__suppress_unused_warning(__compare_result<S == T>::valid);
}
#define __static_assert_type_size_equal(A, B) \
__static_assert_equal_size<A,B>()
#else
#define __static_assert_type_size_equal(A,B) \
static_assert((A) == (B), "")
#endif
__DEVICE__
uint64_t __make_mantissa_base8(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '7')
__r = (__r * 8u) + __tmp - '0';
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa_base10(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
__r = (__r * 10u) + __tmp - '0';
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa_base16(const char *__tagp) {
uint64_t __r = 0;
while (__tagp) {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
__r = (__r * 16u) + __tmp - '0';
else if (__tmp >= 'a' && __tmp <= 'f')
__r = (__r * 16u) + __tmp - 'a' + 10;
else if (__tmp >= 'A' && __tmp <= 'F')
__r = (__r * 16u) + __tmp - 'A' + 10;
else
return 0;
++__tagp;
}
return __r;
}
__DEVICE__
uint64_t __make_mantissa(const char *__tagp) {
if (!__tagp)
return 0u;
if (*__tagp == '0') {
++__tagp;
if (*__tagp == 'x' || *__tagp == 'X')
return __make_mantissa_base16(__tagp);
else
return __make_mantissa_base8(__tagp);
}
return __make_mantissa_base10(__tagp);
}
// BEGIN FLOAT
#if defined(__cplusplus)
__DEVICE__
int abs(int __x) {
int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
__DEVICE__
long labs(long __x) {
long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
__DEVICE__
long long llabs(long long __x) {
long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
return (__x ^ __sgn) - __sgn;
}
#endif
__DEVICE__
float acosf(float __x) { return __ocml_acos_f32(__x); }
__DEVICE__
float acoshf(float __x) { return __ocml_acosh_f32(__x); }
__DEVICE__
float asinf(float __x) { return __ocml_asin_f32(__x); }
__DEVICE__
float asinhf(float __x) { return __ocml_asinh_f32(__x); }
__DEVICE__
float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
__DEVICE__
float atanf(float __x) { return __ocml_atan_f32(__x); }
__DEVICE__
float atanhf(float __x) { return __ocml_atanh_f32(__x); }
__DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
float ceilf(float __x) { return __ocml_ceil_f32(__x); }
__DEVICE__
float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
__DEVICE__
float cosf(float __x) { return __ocml_cos_f32(__x); }
__DEVICE__
float coshf(float __x) { return __ocml_cosh_f32(__x); }
__DEVICE__
float cospif(float __x) { return __ocml_cospi_f32(__x); }
__DEVICE__
float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
__DEVICE__
float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
__DEVICE__
float erfcf(float __x) { return __ocml_erfc_f32(__x); }
__DEVICE__
float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
__DEVICE__
float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
__DEVICE__
float erff(float __x) { return __ocml_erf_f32(__x); }
__DEVICE__
float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
__DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
float exp2f(float __x) { return __ocml_exp2_f32(__x); }
__DEVICE__
float expf(float __x) { return __ocml_exp_f32(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
__DEVICE__
float fabsf(float __x) { return __ocml_fabs_f32(__x); }
__DEVICE__
float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
__DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
float floorf(float __x) { return __ocml_floor_f32(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
}
__DEVICE__
float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
__DEVICE__
float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r =
__ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
}
__DEVICE__
float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
__DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
__DEVICE__
__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
__DEVICE__
__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
__DEVICE__
float j1f(float __x) { return __ocml_j1_f32(__x); }
__DEVICE__
float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case.
if (__n == 0)
return j0f(__x);
if (__n == 1)
return j1f(__x);
float __x0 = j0f(__x);
float __x1 = j1f(__x);
for (int __i = 1; __i < __n; ++__i) {
float __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
__DEVICE__
float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
long long int llroundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float log10f(float __x) { return __ocml_log10_f32(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
float log2f(float __x) { return __ocml_log2_f32(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
float logf(float __x) { return __ocml_log_f32(__x); }
__DEVICE__
long int lrintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
long int lroundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r =
__ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__iptr = __tmp;
return __r;
}
__DEVICE__
float nanf(const char *__tagp) {
union {
float val;
struct ieee_float {
unsigned int mantissa : 22;
unsigned int quiet : 1;
unsigned int exponent : 8;
unsigned int sign : 1;
} bits;
} __tmp;
__static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
__tmp.bits.quiet = 1u;
__tmp.bits.mantissa = __make_mantissa(__tagp);
return __tmp.val;
}
__DEVICE__
float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
return __ocml_nextafter_f32(__x, __y);
}
__DEVICE__
float norm3df(float __x, float __y, float __z) {
return __ocml_len3_f32(__x, __y, __z);
}
__DEVICE__
float norm4df(float __x, float __y, float __z, float __w) {
return __ocml_len4_f32(__x, __y, __z, __w);
}
__DEVICE__
float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
__DEVICE__
float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
__DEVICE__
float normf(int __dim,
const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_sqrt_f32(__r);
}
__DEVICE__
float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
__DEVICE__
float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }
__DEVICE__
float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
__DEVICE__
float remainderf(float __x, float __y) {
return __ocml_remainder_f32(__x, __y);
}
__DEVICE__
float remquof(float __x, float __y, int *__quo) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r = __ocml_remquo_f32(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
*__quo = __tmp;
return __r;
}
__DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
float rintf(float __x) { return __ocml_rint_f32(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
return __ocml_rlen3_f32(__x, __y, __z);
}
__DEVICE__
float rnorm4df(float __x, float __y, float __z, float __w) {
return __ocml_rlen4_f32(__x, __y, __z, __w);
}
__DEVICE__
float rnormf(int __dim,
const float *__a) { // TODO: placeholder until OCML adds support.
float __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_rsqrt_f32(__r);
}
__DEVICE__
float roundf(float __x) { return __ocml_round_f32(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr =
__ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
void sincospif(float __x, float *__sinptr, float *__cosptr) {
float __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincospi_f32(
__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
float sinf(float __x) { return __ocml_sin_f32(__x); }
__DEVICE__
float sinhf(float __x) { return __ocml_sinh_f32(__x); }
__DEVICE__
float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
__DEVICE__
float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
__DEVICE__
float tanf(float __x) { return __ocml_tan_f32(__x); }
__DEVICE__
float tanhf(float __x) { return __ocml_tanh_f32(__x); }
__DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
float truncf(float __x) { return __ocml_trunc_f32(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
__DEVICE__
float y1f(float __x) { return __ocml_y1_f32(__x); }
__DEVICE__
float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return y0f(__x);
if (__n == 1)
return y1f(__x);
float __x0 = y0f(__x);
float __x1 = y1f(__x);
for (int __i = 1; __i < __n; ++__i) {
float __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
// BEGIN INTRINSICS
__DEVICE__
float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
__DEVICE__
float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
__DEVICE__
float __expf(float __x) { return __ocml_native_exp_f32(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
__DEVICE__
float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
__DEVICE__
float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
__DEVICE__
float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fadd_rn(float __x, float __y) { return __x + __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
__DEVICE__
float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
__DEVICE__
float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
__DEVICE__
float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fdiv_rn(float __x, float __y) { return __x / __y; }
#endif
__DEVICE__
float __fdividef(float __x, float __y) { return __x / __y; }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fmaf_rd(float __x, float __y, float __z) {
return __ocml_fma_rtn_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
return __ocml_fma_rte_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_ru(float __x, float __y, float __z) {
return __ocml_fma_rtp_f32(__x, __y, __z);
}
__DEVICE__
float __fmaf_rz(float __x, float __y, float __z) {
return __ocml_fma_rtz_f32(__x, __y, __z);
}
#else
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
}
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
__DEVICE__
float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
__DEVICE__
float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
__DEVICE__
float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fmul_rn(float __x, float __y) { return __x * __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }
__DEVICE__
float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }
__DEVICE__
float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }
__DEVICE__
float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }
#else
__DEVICE__
float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
__DEVICE__
float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
__DEVICE__
float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
__DEVICE__
float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
__DEVICE__
float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
#else
__DEVICE__
float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
__DEVICE__
float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
__DEVICE__
float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
__DEVICE__
float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
#else
__DEVICE__
float __fsub_rn(float __x, float __y) { return __x - __y; }
#endif
__DEVICE__
float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
__DEVICE__
float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
__DEVICE__
float __logf(float __x) { return __ocml_native_log_f32(__x); }
__DEVICE__
float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
__DEVICE__
float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
__DEVICE__
void __sincosf(float __x, float *__sinptr, float *__cosptr) {
*__sinptr = __ocml_native_sin_f32(__x);
*__cosptr = __ocml_native_cos_f32(__x);
}
__DEVICE__
float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
__DEVICE__
float __tanf(float __x) { return __ocml_tan_f32(__x); }
// END INTRINSICS
// END FLOAT
// BEGIN DOUBLE
__DEVICE__
double acos(double __x) { return __ocml_acos_f64(__x); }
__DEVICE__
double acosh(double __x) { return __ocml_acosh_f64(__x); }
__DEVICE__
double asin(double __x) { return __ocml_asin_f64(__x); }
__DEVICE__
double asinh(double __x) { return __ocml_asinh_f64(__x); }
__DEVICE__
double atan(double __x) { return __ocml_atan_f64(__x); }
__DEVICE__
double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }
__DEVICE__
double atanh(double __x) { return __ocml_atanh_f64(__x); }
__DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
double ceil(double __x) { return __ocml_ceil_f64(__x); }
__DEVICE__
double copysign(double __x, double __y) {
return __ocml_copysign_f64(__x, __y);
}
__DEVICE__
double cos(double __x) { return __ocml_cos_f64(__x); }
__DEVICE__
double cosh(double __x) { return __ocml_cosh_f64(__x); }
__DEVICE__
double cospi(double __x) { return __ocml_cospi_f64(__x); }
__DEVICE__
double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
__DEVICE__
double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
__DEVICE__
double erf(double __x) { return __ocml_erf_f64(__x); }
__DEVICE__
double erfc(double __x) { return __ocml_erfc_f64(__x); }
__DEVICE__
double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
__DEVICE__
double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
__DEVICE__
double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
__DEVICE__
double exp(double __x) { return __ocml_exp_f64(__x); }
__DEVICE__
double exp10(double __x) { return __ocml_exp10_f64(__x); }
__DEVICE__
double exp2(double __x) { return __ocml_exp2_f64(__x); }
__DEVICE__
double expm1(double __x) { return __ocml_expm1_f64(__x); }
__DEVICE__
double fabs(double __x) { return __ocml_fabs_f64(__x); }
__DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
double floor(double __x) { return __ocml_floor_f64(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
}
__DEVICE__
double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
__DEVICE__
double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r =
__ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
}
__DEVICE__
double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }
__DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
__DEVICE__
__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
__DEVICE__
__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
__DEVICE__
double j1(double __x) { return __ocml_j1_f64(__x); }
__DEVICE__
double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return j0(__x);
if (__n == 1)
return j1(__x);
double __x0 = j0(__x);
double __x1 = j1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
__DEVICE__
double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
long long int llrint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
long long int llround(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
__DEVICE__
double log10(double __x) { return __ocml_log10_f64(__x); }
__DEVICE__
double log1p(double __x) { return __ocml_log1p_f64(__x); }
__DEVICE__
double log2(double __x) { return __ocml_log2_f64(__x); }
__DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
long int lrint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
long int lround(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r =
__ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
*__iptr = __tmp;
return __r;
}
__DEVICE__
double nan(const char *__tagp) {
#if !_WIN32
union {
double val;
struct ieee_double {
uint64_t mantissa : 51;
uint32_t quiet : 1;
uint32_t exponent : 11;
uint32_t sign : 1;
} bits;
} __tmp;
__static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
__tmp.bits.sign = 0u;
__tmp.bits.exponent = ~0u;
__tmp.bits.quiet = 1u;
__tmp.bits.mantissa = __make_mantissa(__tagp);
return __tmp.val;
#else
__static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));
uint64_t __val = __make_mantissa(__tagp);
__val |= 0xFFF << 51;
return *reinterpret_cast<double *>(&__val);
#endif
}
__DEVICE__
double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
return __ocml_nextafter_f64(__x, __y);
}
__DEVICE__
double norm(int __dim,
const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_sqrt_f64(__r);
}
__DEVICE__
double norm3d(double __x, double __y, double __z) {
return __ocml_len3_f64(__x, __y, __z);
}
__DEVICE__
double norm4d(double __x, double __y, double __z, double __w) {
return __ocml_len4_f64(__x, __y, __z, __w);
}
__DEVICE__
double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
__DEVICE__
double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
__DEVICE__
double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
__DEVICE__
double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }
__DEVICE__
double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
__DEVICE__
double remainder(double __x, double __y) {
return __ocml_remainder_f64(__x, __y);
}
__DEVICE__
double remquo(double __x, double __y, int *__quo) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r = __ocml_remquo_f64(
__x, __y, (__attribute__((address_space(5))) int *)&__tmp);
*__quo = __tmp;
return __r;
}
__DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
double rint(double __x) { return __ocml_rint_f64(__x); }
__DEVICE__
double rnorm(int __dim,
const double *__a) { // TODO: placeholder until OCML adds support.
double __r = 0;
while (__dim--) {
__r += __a[0] * __a[0];
++__a;
}
return __ocml_rsqrt_f64(__r);
}
__DEVICE__
double rnorm3d(double __x, double __y, double __z) {
return __ocml_rlen3_f64(__x, __y, __z);
}
__DEVICE__
double rnorm4d(double __x, double __y, double __z, double __w) {
return __ocml_rlen4_f64(__x, __y, __z, __w);
}
__DEVICE__
double round(double __x) { return __ocml_round_f64(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
__DEVICE__
void sincos(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincos_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
void sincospi(double __x, double *__sinptr, double *__cosptr) {
double __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
*__sinptr = __ocml_sincospi_f64(
__x, (__attribute__((address_space(5))) double *)&__tmp);
*__cosptr = __tmp;
}
__DEVICE__
double sinh(double __x) { return __ocml_sinh_f64(__x); }
__DEVICE__
double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
__DEVICE__
double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
__DEVICE__
double tan(double __x) { return __ocml_tan_f64(__x); }
__DEVICE__
double tanh(double __x) { return __ocml_tanh_f64(__x); }
__DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
double trunc(double __x) { return __ocml_trunc_f64(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
__DEVICE__
double y1(double __x) { return __ocml_y1_f64(__x); }
__DEVICE__
double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication
// and the Miller & Brown algorithm
// for linear recurrences to get O(log n) steps, but it's unclear if
// it'd be beneficial in this case. Placeholder until OCML adds
// support.
if (__n == 0)
return y0(__x);
if (__n == 1)
return y1(__x);
double __x0 = y0(__x);
double __x1 = y1(__x);
for (int __i = 1; __i < __n; ++__i) {
double __x2 = (2 * __i) / __x * __x1 - __x0;
__x0 = __x1;
__x1 = __x2;
}
return __x1;
}
// BEGIN INTRINSICS
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dadd_rd(double __x, double __y) {
return __ocml_add_rtn_f64(__x, __y);
}
__DEVICE__
double __dadd_rn(double __x, double __y) {
return __ocml_add_rte_f64(__x, __y);
}
__DEVICE__
double __dadd_ru(double __x, double __y) {
return __ocml_add_rtp_f64(__x, __y);
}
__DEVICE__
double __dadd_rz(double __x, double __y) {
return __ocml_add_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dadd_rn(double __x, double __y) { return __x + __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __ddiv_rd(double __x, double __y) {
return __ocml_div_rtn_f64(__x, __y);
}
__DEVICE__
double __ddiv_rn(double __x, double __y) {
return __ocml_div_rte_f64(__x, __y);
}
__DEVICE__
double __ddiv_ru(double __x, double __y) {
return __ocml_div_rtp_f64(__x, __y);
}
__DEVICE__
double __ddiv_rz(double __x, double __y) {
return __ocml_div_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __ddiv_rn(double __x, double __y) { return __x / __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dmul_rd(double __x, double __y) {
return __ocml_mul_rtn_f64(__x, __y);
}
__DEVICE__
double __dmul_rn(double __x, double __y) {
return __ocml_mul_rte_f64(__x, __y);
}
__DEVICE__
double __dmul_ru(double __x, double __y) {
return __ocml_mul_rtp_f64(__x, __y);
}
__DEVICE__
double __dmul_rz(double __x, double __y) {
return __ocml_mul_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dmul_rn(double __x, double __y) { return __x * __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); }
__DEVICE__
double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); }
__DEVICE__
double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); }
__DEVICE__
double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); }
#else
__DEVICE__
double __drcp_rn(double __x) { return 1.0 / __x; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
__DEVICE__
double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }
__DEVICE__
double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
__DEVICE__
double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
#else
__DEVICE__
double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __dsub_rd(double __x, double __y) {
return __ocml_sub_rtn_f64(__x, __y);
}
__DEVICE__
double __dsub_rn(double __x, double __y) {
return __ocml_sub_rte_f64(__x, __y);
}
__DEVICE__
double __dsub_ru(double __x, double __y) {
return __ocml_sub_rtp_f64(__x, __y);
}
__DEVICE__
double __dsub_rz(double __x, double __y) {
return __ocml_sub_rtz_f64(__x, __y);
}
#else
__DEVICE__
double __dsub_rn(double __x, double __y) { return __x - __y; }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
double __fma_rd(double __x, double __y, double __z) {
return __ocml_fma_rtn_f64(__x, __y, __z);
}
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
return __ocml_fma_rte_f64(__x, __y, __z);
}
__DEVICE__
double __fma_ru(double __x, double __y, double __z) {
return __ocml_fma_rtp_f64(__x, __y, __z);
}
__DEVICE__
double __fma_rz(double __x, double __y, double __z) {
return __ocml_fma_rtz_f64(__x, __y, __z);
}
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
}
#endif
// END INTRINSICS
// END DOUBLE
// C only macros
#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)
#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)
#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)
#define signbit(__x) \
_Generic((__x), float : __signbitf, double : __signbit)(__x)
#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
#if defined(__cplusplus)
template <class T> __DEVICE__ T min(T __arg1, T __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
template <class T> __DEVICE__ T max(T __arg1, T __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
__DEVICE__ int min(int __arg1, int __arg2) {
return (__arg1 < __arg2) ? __arg1 : __arg2;
}
__DEVICE__ int max(int __arg1, int __arg2) {
return (__arg1 > __arg2) ? __arg1 : __arg2;
}
__DEVICE__
float max(float __x, float __y) { return fmaxf(__x, __y); }
__DEVICE__
double max(double __x, double __y) { return fmax(__x, __y); }
__DEVICE__
float min(float __x, float __y) { return fminf(__x, __y); }
__DEVICE__
double min(double __x, double __y) { return fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
return std::min(__arg1, __arg2);
}
__host__ inline static int max(int __arg1, int __arg2) {
return std::max(__arg1, __arg2);
}
#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
#endif
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__RETURN_TYPE")
#endif // __CLANG_HIP_MATH_H__
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
if (GetPixelWriteMask(next,p) == 0)
{
SetPixelBackgoundColor(append_image,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
continue;
}
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside != MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->read_mask=image->read_mask;
clone_image->write_mask=image->write_mask;
clone_image->alpha_trait=image->alpha_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
(void) CloneString(&clone_info->size,image_info->size);
(void) CloneString(&clone_info->extract,image_info->extract);
(void) CloneString(&clone_info->scenes,image_info->scenes);
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
(void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor);
(void) CloneString(&clone_info->server_name,image_info->server_name);
(void) CloneString(&clone_info->font,image_info->font);
(void) CloneString(&clone_info->texture,image_info->texture);
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mask_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
mask_image->read_mask=MagickFalse;
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
foo;
foo=(ssize_t) strtol(q,&q,10);
(void) foo;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t)
(MagickPathExtent-(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
/* FUTURE: Compare update with code from InterpretImageProperties()
Note that a 'filename:' property should not need depth recursion.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),option,(size_t)
(MagickPathExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) != 0)
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth > (8*sizeof(MagickSizeType)))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*component != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) ResetMagickMemory(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(mask,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=0;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=QuantumRange;
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelReadMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,cHRM,EXIF,gAMA,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices:
% The vertex nearest the origin in RGB space and the vertex farthest from
% the origin.
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of
% pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _RealPixelPacket
{
MagickRealType
red,
green,
blue,
opacity;
} RealPixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
RealPixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
RealPixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
ssize_t
*cache;
RealPixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(const Image *,CubeInfo *,const NodeInfo *),
PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,RealPixelPacket *alpha_pixel)
{
MagickRealType
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline Quantum ClampToUnsignedQuantum(const MagickRealType value)
{
if (value <= 0.0)
return((Quantum) 0);
if (value >= QuantumRange)
return((Quantum) QuantumRange);
return((Quantum) (value+0.5));
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const RealPixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampToUnsignedQuantum(
GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(
ClampToUnsignedQuantum(GetPixelGreen(pixel))) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelOpacity(pixel))) >>
index) & 0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ssize_t
y;
/*
Allocate image colormap.
*/
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if ((image->colorspace != GRAYColorspace) &&
(IsRGBColorspace(image->colorspace) == MagickFalse) &&
(image->colorspace != CMYColorspace))
(void) TransformImageColorspace((Image *) image,RGBColorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
RealPixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
{
Quantum
intensity;
register PixelPacket
*restrict q;
register ssize_t
i;
/*
Monochrome image.
*/
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
intensity=(Quantum) (PixelIntensity(q) < ((MagickRealType)
QuantumRange/2.0) ? 0 : QuantumRange);
SetPixelRed(q,intensity);
SetPixelGreen(q,intensity);
SetPixelBlue(q,intensity);
q++;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,RGBColorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if (cube_info->quantize_info->colorspace == TransparentColorspace)
associate_alpha=MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
RealPixelPacket
error,
mid,
midpoint,
pixel;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if ((image->colorspace != GRAYColorspace) &&
(image->colorspace != CMYColorspace) &&
(IsRGBColorspace(image->colorspace) == MagickFalse))
(void) TransformImageColorspace((Image *) image,RGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(image,cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
node_info->quantize_error+=sqrt((double) (count*error.red*error.red+
count*error.green*error.green+count*error.blue*error.blue+
count*error.opacity*error.opacity));
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*pixel.red;
node_info->total_color.green+=count*QuantumScale*pixel.green;
node_info->total_color.blue+=count*QuantumScale*pixel.blue;
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*pixel.opacity;
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(image,cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(image,cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
node_info->quantize_error+=sqrt((double) (count*error.red*error.red+
count*error.green*error.green+count*error.blue*error.blue+
count*error.opacity*error.opacity));
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*pixel.red;
node_info->total_color.green+=count*QuantumScale*pixel.green;
node_info->total_color.blue+=count*QuantumScale*pixel.blue;
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*pixel.opacity;
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,RGBColorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*restrict p;
register RealPixelPacket
*restrict q;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha-beta;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
MagickRealType
gamma;
gamma=(MagickRealType) (QuantumScale*(QuantumRange-
(MagickRealType) q->opacity));
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (
alpha*gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->cache != (ssize_t *) NULL)
cube_info->cache=(ssize_t *) RelinquishMagickMemory(cube_info->cache);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
quantize_info->signature=(~MagickSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (RealPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixels[i] != (RealPixelPacket *) NULL)
pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static RealPixelPacket **AcquirePixelThreadSet(const size_t count)
{
RealPixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (RealPixelPacket **) NULL)
return((RealPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (RealPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const RealPixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t)
(RedShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampToUnsignedQuantum(
pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
RealPixelPacket
**pixels;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (RealPixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
RealPixelPacket
*current,
*previous;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
RealPixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7*current[u-v].red/16;
pixel.green+=7*current[u-v].green/16;
pixel.blue+=7*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5*previous[u].red/16;
pixel.green+=5*previous[u].green/16;
pixel.blue+=5*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3*previous[u-v].red/16;
pixel.green+=3*previous[u-v].green/16;
pixel.blue+=3*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red);
pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green);
pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FloydSteinbergDither)
#endif
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
MagickBooleanType
proceed;
RealPixelPacket
color,
pixel;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red);
pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green);
pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
node_info=node_info->parent;
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static inline ssize_t MagickMax(const ssize_t x,const ssize_t y)
{
if (x > y)
return(x);
return(y);
}
static inline ssize_t MagickMin(const ssize_t x,const ssize_t y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireCacheView(image);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) ResetMagickMemory(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->cache=(ssize_t *) AcquireQuantumMemory(length,
sizeof(*cube_info->cache));
if (cube_info->cache == (ssize_t *) NULL)
return((CubeInfo *) NULL);
/*
Initialize color cache.
*/
for (i=0; i < (ssize_t) length; i++)
cube_info->cache[i]=(-1);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=1.0/weight;
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) ResetMagickMemory(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) ResetMagickMemory(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red);
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green);
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue);
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline ssize_t MagickRound(MagickRealType x)
{
/*
Round the fraction to nearest integer.
*/
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImageChannel)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(image,cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(image,cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(image,cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(image,cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(image,cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
static MagickBooleanType DirectToColormapImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
number_colors;
ssize_t
y;
status=MagickTrue;
number_colors=(size_t) (image->columns*image->rows);
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->colors != number_colors)
return(MagickFalse);
i=0;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
proceed;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
image->colormap[i].red=GetPixelRed(q);
image->colormap[i].green=GetPixelGreen(q);
image->colormap[i].blue=GetPixelBlue(q);
image->colormap[i].opacity=GetPixelOpacity(q);
SetPixelIndex(indexes+x,i);
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if ((image->columns*image->rows) <= maximum_colors)
(void) DirectToColormapImage(image,&image->exception);
if ((IsGrayImage(image,&image->exception) != MagickFalse) &&
(image->matte == MagickFalse))
(void) SetGrayscaleImage(image);
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
return(MagickTrue);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(image,cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(image,cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(image,cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
ssize_t
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelIntensityToQuantum(color_1)-(ssize_t)
PixelIntensityToQuantum(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
for (i=0; i <= (ssize_t) MaxMap; i++)
colormap_index[i]=(-1);
if (AcquireImageColormap(image,MaxMap+1) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register const PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register const PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (IsMonochromeImage(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
canny_edge_openmp.c | /*
gcc -o canny_edge canny_edge.c hysteresis.c pgm_io.c -lm
(Note: You can also use optimization such as -O3)
The resulting program, canny_edge, will process images in the PGM format.
Parameter selection is left up to the user. A broad range of parameters to
use as a starting point are: sigma 0.60-2.40, tlow 0.20-0.50 and,
thigh 0.60-0.90.
If you are using a Unix system, PGM file format conversion tools can be found
at ftp://wuarchive.wustl.edu/graphics/graphics/packages/pbmplus/.
Otherwise, it would be easy for anyone to rewrite the image I/O procedures
because they are listed in the separate file pgm_io.c.
If you want to check your compiled code, you can download grey-scale and edge
images from http://marathon.csee.usf.edu/edge/edge_detection.html. You can use
the parameters given in the edge filenames and check whether the edges that
are output from your program match the edge images posted at that address.
*/
/*******************************************************************************
* --------------------------------------------
*(c) 2001 University of South Florida, Tampa
* Use, or copying without permission prohibited.
* PERMISSION TO USE
* In transmitting this software, permission to use for research and
* educational purposes is hereby granted. This software may be copied for
* archival and backup purposes only. This software may not be transmitted
* to a third party without prior permission of the copyright holder. This
* permission may be granted only by Mike Heath or Prof. Sudeep Sarkar of
* University of South Florida (sarkar@csee.usf.edu). Acknowledgment as
* appropriate is respectfully requested.
*
* Heath, M., Sarkar, S., Sanocki, T., and Bowyer, K. Comparison of edge
* detectors: a methodology and initial study, Computer Vision and Image
* Understanding 69 (1), 38-54, January 1998.
* Heath, M., Sarkar, S., Sanocki, T. and Bowyer, K.W. A Robust Visual
* Method for Assessing the Relative Performance of Edge Detection
* Algorithms, IEEE Transactions on Pattern Analysis and Machine
* Intelligence 19 (12), 1338-1359, December 1997.
* ------------------------------------------------------
*
* PROGRAM: canny_edge
* PURPOSE: This program implements a "Canny" edge detector. The processing
* steps are as follows:
*
* 1) Convolve the image with a separable gaussian filter.
* 2) Take the dx and dy the first derivatives using [-1,0,1] and [1,0,-1]'.
* 3) Compute the magnitude: sqrt(dx*dx+dy*dy).
* 4) Perform non-maximal suppression.
* 5) Perform hysteresis.
*
* The user must input three parameters. These are as follows:
*
* sigma = The standard deviation of the gaussian smoothing filter.
* tlow = Specifies the low value to use in hysteresis. This is a
* fraction (0-1) of the computed high threshold edge strength value.
* thigh = Specifies the high value to use in hysteresis. This fraction (0-1)
* specifies the percentage point in a histogram of the gradient of
* the magnitude. Magnitude values of zero are not counted in the
* histogram.
*
* NAME: Mike Heath
* Computer Vision Laboratory
* University of South Floeida
* heath@csee.usf.edu
*
* DATE: 2/15/96
*
* Modified: 5/17/96 - To write out a floating point RAW headerless file of
* the edge gradient "up the edge" where the angle is
* defined in radians counterclockwise from the x direction.
* (Mike Heath)
*******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "hysteresis.c"
#include "pgm_io.c"
#define VERBOSE 0
#define BOOSTBLURFACTOR 90.0
int read_pgm_image(char *infilename, unsigned char **image, int *rows,
int *cols);
int write_pgm_image(char *outfilename, unsigned char *image, int rows,
int cols, char *comment, int maxval);
void canny(unsigned char *image, int rows, int cols, float sigma,
float tlow, float thigh, unsigned char **edge, char *fname);
void gaussian_smooth(unsigned char *image, int rows, int cols, float sigma,
short int **smoothedim);
void make_gaussian_kernel(float sigma, float **kernel, int *windowsize);
void derrivative_x_y(short int *smoothedim, int rows, int cols,
short int **delta_x, short int **delta_y);
void magnitude_x_y(short int *delta_x, short int *delta_y, int rows, int cols,
short int **magnitude);
void apply_hysteresis(short int *mag, unsigned char *nms, int rows, int cols,
float tlow, float thigh, unsigned char *edge);
void radian_direction(short int *delta_x, short int *delta_y, int rows,
int cols, float **dir_radians, int xdirtag, int ydirtag);
double angle_radians(double x, double y);
int main(int argc, char *argv[])
{
char *infilename = NULL; /* Name of the input image */
char *dirfilename = NULL; /* Name of the output gradient direction image */
char outfilename[128]; /* Name of the output "edge" image */
char composedfname[128]; /* Name of the output "direction" image */
unsigned char *image; /* The input image */
unsigned char *edge; /* The output edge image */
int rows, cols; /* The dimensions of the image. */
float sigma, /* Standard deviation of the gaussian kernel. */
tlow, /* Fraction of the high threshold in hysteresis. */
thigh; /* High hysteresis threshold control. The actual
threshold is the (100 * thigh) percentage point
in the histogram of the magnitude of the
gradient image that passes non-maximal
suppression. */
/****************************************************************************
* Get the command line arguments.
****************************************************************************/
if(argc < 5){
fprintf(stderr,"\n<USAGE> %s image sigma tlow thigh [writedirim]\n",argv[0]);
fprintf(stderr,"\n image: An image to process. Must be in ");
fprintf(stderr,"PGM format.\n");
fprintf(stderr," sigma: Standard deviation of the gaussian");
fprintf(stderr," blur kernel.\n");
fprintf(stderr," tlow: Fraction (0.0-1.0) of the high ");
fprintf(stderr,"edge strength threshold.\n");
fprintf(stderr," thigh: Fraction (0.0-1.0) of the distribution");
fprintf(stderr," of non-zero edge\n strengths for ");
fprintf(stderr,"hysteresis. The fraction is used to compute\n");
fprintf(stderr," the high edge strength threshold.\n");
fprintf(stderr," writedirim: Optional argument to output ");
fprintf(stderr,"a floating point");
fprintf(stderr," direction image.\n\n");
exit(1);
}
infilename = argv[1];
sigma = atof(argv[2]);
tlow = atof(argv[3]);
thigh = atof(argv[4]);
if(argc == 6) dirfilename = infilename;
else dirfilename = NULL;
/****************************************************************************
* Read in the image. This read function allocates memory for the image.
****************************************************************************/
if(VERBOSE) printf("Reading the image %s.\n", infilename);
if(read_pgm_image(infilename, &image, &rows, &cols) == 0){
fprintf(stderr, "Error reading the input image, %s.\n", infilename);
exit(1);
}
/****************************************************************************
* Perform the edge detection. All of the work takes place here.
****************************************************************************/
if(VERBOSE) printf("Starting Canny edge detection.\n");
if(dirfilename != NULL){
sprintf(composedfname, "%s_s_%3.2f_l_%3.2f_h_%3.2f.fim", infilename,
sigma, tlow, thigh);
dirfilename = composedfname;
}
canny(image, rows, cols, sigma, tlow, thigh, &edge, dirfilename);
/****************************************************************************
* Write out the edge image to a file.
****************************************************************************/
sprintf(outfilename, "%s_s_%3.2f_l_%3.2f_h_%3.2f.pgm", infilename,
sigma, tlow, thigh);
if(VERBOSE) printf("Writing the edge iname in the file %s.\n", outfilename);
if(write_pgm_image(outfilename, edge, rows, cols, "", 255) == 0){
fprintf(stderr, "Error writing the edge image, %s.\n", outfilename);
exit(1);
}
return 0;
}
/*******************************************************************************
* PROCEDURE: canny
* PURPOSE: To perform canny edge detection.
* NAME: Mike Heath
* DATE: 2/15/96
*******************************************************************************/
void canny(unsigned char *image, int rows, int cols, float sigma,
float tlow, float thigh, unsigned char **edge, char *fname)
{
FILE *fpdir=NULL; /* File to write the gradient image to. */
unsigned char *nms; /* Points that are local maximal magnitude. */
short int *smoothedim, /* The image after gaussian smoothing. */
*delta_x, /* The first devivative image, x-direction. */
*delta_y, /* The first derivative image, y-direction. */
*magnitude; /* The magnitude of the gadient image. */
int r, c, pos;
float *dir_radians=NULL; /* Gradient direction image. */
/****************************************************************************
* Perform gaussian smoothing on the image using the input standard
* deviation.
****************************************************************************/
if(VERBOSE) printf("Smoothing the image using a gaussian kernel.\n");
gaussian_smooth(image, rows, cols, sigma, &smoothedim);
/****************************************************************************
* Compute the first derivative in the x and y directions.
****************************************************************************/
if(VERBOSE) printf("Computing the X and Y first derivatives.\n");
derrivative_x_y(smoothedim, rows, cols, &delta_x, &delta_y);
/****************************************************************************
* This option to write out the direction of the edge gradient was added
* to make the information available for computing an edge quality figure
* of merit.
****************************************************************************/
if(fname != NULL){
/*************************************************************************
* Compute the direction up the gradient, in radians that are
* specified counteclockwise from the positive x-axis.
*************************************************************************/
radian_direction(delta_x, delta_y, rows, cols, &dir_radians, -1, -1);
/*************************************************************************
* Write the gradient direction image out to a file.
*************************************************************************/
if((fpdir = fopen(fname, "wb")) == NULL){
fprintf(stderr, "Error opening the file %s for writing.\n", fname);
exit(1);
}
fwrite(dir_radians, sizeof(float), rows*cols, fpdir);
fclose(fpdir);
free(dir_radians);
}
/****************************************************************************
* Compute the magnitude of the gradient.
****************************************************************************/
if(VERBOSE) printf("Computing the magnitude of the gradient.\n");
magnitude_x_y(delta_x, delta_y, rows, cols, &magnitude);
/****************************************************************************
* Perform non-maximal suppression.
****************************************************************************/
if(VERBOSE) printf("Doing the non-maximal suppression.\n");
if((nms = (unsigned char *) calloc(rows*cols,sizeof(unsigned char)))==NULL){
fprintf(stderr, "Error allocating the nms image.\n");
exit(1);
}
non_max_supp(magnitude, delta_x, delta_y, rows, cols, nms);
/****************************************************************************
* Use hysteresis to mark the edge pixels.
****************************************************************************/
if(VERBOSE) printf("Doing hysteresis thresholding.\n");
if((*edge=(unsigned char *)calloc(rows*cols,sizeof(unsigned char))) ==NULL){
fprintf(stderr, "Error allocating the edge image.\n");
exit(1);
}
apply_hysteresis(magnitude, nms, rows, cols, tlow, thigh, *edge);
/****************************************************************************
* Free all of the memory that we allocated except for the edge image that
* is still being used to store out result.
****************************************************************************/
free(smoothedim);
free(delta_x);
free(delta_y);
free(magnitude);
free(nms);
}
/*******************************************************************************
* Procedure: radian_direction
* Purpose: To compute a direction of the gradient image from component dx and
* dy images. Because not all derriviatives are computed in the same way, this
* code allows for dx or dy to have been calculated in different ways.
*
* FOR X: xdirtag = -1 for [-1 0 1]
* xdirtag = 1 for [ 1 0 -1]
*
* FOR Y: ydirtag = -1 for [-1 0 1]'
* ydirtag = 1 for [ 1 0 -1]'
*
* The resulting angle is in radians measured counterclockwise from the
* xdirection. The angle points "up the gradient".
*******************************************************************************/
void radian_direction(short int *delta_x, short int *delta_y, int rows,
int cols, float **dir_radians, int xdirtag, int ydirtag)
{
int r, c, pos;
float *dirim=NULL;
double dx, dy;
/****************************************************************************
* Allocate an image to store the direction of the gradient.
****************************************************************************/
if((dirim = (float *) calloc(rows*cols, sizeof(float))) == NULL){
fprintf(stderr, "Error allocating the gradient direction image.\n");
exit(1);
}
*dir_radians = dirim;
for(r=0,pos=0;r<rows;r++){
for(c=0;c<cols;c++,pos++){
dx = (double)delta_x[pos];
dy = (double)delta_y[pos];
if(xdirtag == 1) dx = -dx;
if(ydirtag == -1) dy = -dy;
dirim[pos] = (float)angle_radians(dx, dy);
}
}
}
/*******************************************************************************
* FUNCTION: angle_radians
* PURPOSE: This procedure computes the angle of a vector with components x and
* y. It returns this angle in radians with the answer being in the range
* 0 <= angle <2*PI.
*******************************************************************************/
double angle_radians(double x, double y)
{
double xu, yu, ang;
xu = fabs(x);
yu = fabs(y);
if((xu == 0) && (yu == 0)) return(0);
ang = atan(yu/xu);
if(x >= 0){
if(y >= 0) return(ang);
else return(2*M_PI - ang);
}
else{
if(y >= 0) return(M_PI - ang);
else return(M_PI + ang);
}
}
/*******************************************************************************
* PROCEDURE: magnitude_x_y
* PURPOSE: Compute the magnitude of the gradient. This is the square root of
* the sum of the squared derivative values.
* NAME: Mike Heath
* DATE: 2/15/96
*******************************************************************************/
void magnitude_x_y(short int *delta_x, short int *delta_y, int rows, int cols,
short int **magnitude)
{
int r, c, pos, sq1, sq2;
/****************************************************************************
* Allocate an image to store the magnitude of the gradient.
****************************************************************************/
if((*magnitude = (short *) calloc(rows*cols, sizeof(short))) == NULL){
fprintf(stderr, "Error allocating the magnitude image.\n");
exit(1);
}
for(r=0,pos=0;r<rows;r++){
for(c=0;c<cols;c++,pos++){
sq1 = (int)delta_x[pos] * (int)delta_x[pos];
sq2 = (int)delta_y[pos] * (int)delta_y[pos];
(*magnitude)[pos] = (short)(0.5 + sqrt((float)sq1 + (float)sq2));
}
}
}
/*******************************************************************************
* PROCEDURE: derrivative_x_y
* PURPOSE: Compute the first derivative of the image in both the x any y
* directions. The differential filters that are used are:
*
* -1
* dx = -1 0 +1 and dy = 0
* +1
*
* NAME: Mike Heath
* DATE: 2/15/96
*******************************************************************************/
void derrivative_x_y(short int *smoothedim, int rows, int cols,
short int **delta_x, short int **delta_y)
{
int r, c, pos;
/****************************************************************************
* Allocate images to store the derivatives.
****************************************************************************/
if(((*delta_x) = (short *) calloc(rows*cols, sizeof(short))) == NULL){
fprintf(stderr, "Error allocating the delta_x image.\n");
exit(1);
}
if(((*delta_y) = (short *) calloc(rows*cols, sizeof(short))) == NULL){
fprintf(stderr, "Error allocating the delta_x image.\n");
exit(1);
}
/****************************************************************************
* Compute the x-derivative. Adjust the derivative at the borders to avoid
* losing pixels.
****************************************************************************/
if(VERBOSE) printf(" Computing the X-direction derivative.\n");
for(r=0;r<rows;r++){
pos = r * cols;
(*delta_x)[pos] = smoothedim[pos+1] - smoothedim[pos];
pos++;
for(c=1;c<(cols-1);c++,pos++){
(*delta_x)[pos] = smoothedim[pos+1] - smoothedim[pos-1];
}
(*delta_x)[pos] = smoothedim[pos] - smoothedim[pos-1];
}
/****************************************************************************
* Compute the y-derivative. Adjust the derivative at the borders to avoid
* losing pixels.
****************************************************************************/
if(VERBOSE) printf(" Computing the Y-direction derivative.\n");
for(c=0;c<cols;c++){
pos = c;
(*delta_y)[pos] = smoothedim[pos+cols] - smoothedim[pos];
pos += cols;
for(r=1;r<(rows-1);r++,pos+=cols){
(*delta_y)[pos] = smoothedim[pos+cols] - smoothedim[pos-cols];
}
(*delta_y)[pos] = smoothedim[pos] - smoothedim[pos-cols];
}
}
/*******************************************************************************
* PROCEDURE: gaussian_smooth
* PURPOSE: Blur an image with a gaussian filter.
* NAME: Mike Heath
* DATE: 2/15/96
*******************************************************************************/
void gaussian_smooth(unsigned char *image, int rows, int cols, float sigma,
short int **smoothedim)
{
int r, c, rr, cc, /* Counter variables. */
windowsize, /* Dimension of the gaussian kernel. */
center; /* Half of the windowsize. */
float *tempim, /* Buffer for separable filter gaussian smoothing. */
*kernel, /* A one dimensional gaussian kernel. */
dot, /* Dot product summing variable. */
sum; /* Sum of the kernel weights variable. */
/****************************************************************************
* Create a 1-dimensional gaussian smoothing kernel.
****************************************************************************/
if(VERBOSE) printf(" Computing the gaussian smoothing kernel.\n");
make_gaussian_kernel(sigma, &kernel, &windowsize);
center = windowsize / 2;
/****************************************************************************
* Allocate a temporary buffer image and the smoothed image.
****************************************************************************/
if((tempim = (float *) calloc(rows*cols, sizeof(float))) == NULL){
fprintf(stderr, "Error allocating the buffer image.\n");
exit(1);
}
if(((*smoothedim) = (short int *) calloc(rows*cols,
sizeof(short int))) == NULL){
fprintf(stderr, "Error allocating the smoothed image.\n");
exit(1);
}
/****************************************************************************
* Blur in the x - direction.
****************************************************************************/
if(VERBOSE) printf(" Bluring the image in the X-direction.\n");
#pragma omp parallel for private(cc) reduction(+: dot, sum) collapse(2)
for(r=0;r<rows;r++){
for(c=0;c<cols;c++){
dot = 0.0;
sum = 0.0;
for(cc=(-center);cc<=center;cc++){
if(((c+cc) >= 0) && ((c+cc) < cols)){
dot += (float)image[r*cols+(c+cc)] * kernel[center+cc];
sum += kernel[center+cc];
}
}
tempim[r*cols+c] = dot/sum;
}
}
/****************************************************************************
* Blur in the y - direction.
****************************************************************************/
if(VERBOSE) printf(" Bluring the image in the Y-direction.\n");
#pragma omp parallel for private(rr) reduction(+: dot, sum) collapse(2)
for(c=0;c<cols;c++){
for(r=0;r<rows;r++){
sum = 0.0;
dot = 0.0;
for(rr=(-center);rr<=center;rr++){
if(((r+rr) >= 0) && ((r+rr) < rows)){
dot += tempim[(r+rr)*cols+c] * kernel[center+rr];
sum += kernel[center+rr];
}
}
(*smoothedim)[r*cols+c] = (short int)(dot*BOOSTBLURFACTOR/sum + 0.5);
}
}
free(tempim);
free(kernel);
}
/*******************************************************************************
* PROCEDURE: make_gaussian_kernel
* PURPOSE: Create a one dimensional gaussian kernel.
* NAME: Mike Heath
* DATE: 2/15/96
*******************************************************************************/
void make_gaussian_kernel(float sigma, float **kernel, int *windowsize)
{
int i, center;
float x, fx, sum=0.0;
*windowsize = 1 + 2 * ceil(2.5 * sigma);
center = (*windowsize) / 2;
if(VERBOSE) printf(" The kernel has %d elements.\n", *windowsize);
if((*kernel = (float *) calloc((*windowsize), sizeof(float))) == NULL){
fprintf(stderr, "Error callocing the gaussian kernel array.\n");
exit(1);
}
for(i=0;i<(*windowsize);i++){
x = (float)(i - center);
fx = pow(2.71828, -0.5*x*x/(sigma*sigma)) / (sigma * sqrt(6.2831853));
(*kernel)[i] = fx;
sum += fx;
}
for(i=0;i<(*windowsize);i++) (*kernel)[i] /= sum;
if(VERBOSE){
printf("The filter coefficients are:\n");
for(i=0;i<(*windowsize);i++)
printf("kernel[%d] = %f\n", i, (*kernel)[i]);
}
}
|
regularisation.h | /* Incremental diffusion regularisation of parametrised transformation
using (globally optimal) belief-propagation on minimum spanning tree.
Fast distance transform uses squared differences.
Similarity cost for each node and label has to be given as input.
*/
void messageDT(int ind,float* data,short* indout,int len1,float offsetx,float offsety,float offsetz){
//int ind1=get_global_id(0)+start;
// int ind=ordered[ind1];
int len2=len1*len1;
int len3=len1*len1*len1;
float z[len1*2+1];
float* val;
float* valout;
short* indo;
float* valb;
float* valb2;
float buffer[len3];
float buffer2[len3];
int* indb;
int* indb2;
int bufferi[len3];
int bufferi2[len3];
for(int i=0;i<len1*2+1;i++){
z[i]=(i-len1+offsety)*(i-len1+offsety);
}
for(int k1=0;k1<len1;k1++){
for(int j1=0;j1<len1;j1++){
//valb=buffer2+(j1*len1+k1*len1*len1);//
val=data+ind*len3+(j1*len1+k1*len1*len1);
valb2=buffer+(j1*len1+k1*len1*len1);
indb=bufferi+(j1*len1+k1*len1*len1);
int num=(j1*len1+k1*len1*len1);
for(int i=0;i<len1;i++){
float minval=val[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(val[j]+z[i-j+len1]<minval);
minval=b?val[j]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valb2[i]=minval;
indb[i]=minind+num;
}
}
}
for(int i=0;i<len1*2;i++){
z[i]=(i-len1+offsetx)*(i-len1+offsetx);
}
for(int k1=0;k1<len1;k1++){
for(int i1=0;i1<len1;i1++){
valb=buffer+(i1+k1*len1*len1);
valb2=buffer2+(i1+k1*len1*len1);
indb=bufferi+(i1+k1*len1*len1);
indb2=bufferi2+(i1+k1*len1*len1);
for(int i=0;i<len1;i++){
float minval=valb[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(valb[j*len1]+z[i-j+len1]<minval);
minval=b?valb[j*len1]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valb2[i*len1]=minval;
indb2[i*len1]=indb[minind*len1];
}
}
}
for(int i=0;i<len1*2;i++){
z[i]=(i-len1+offsetz)*(i-len1+offsetz);
}
for(int j1=0;j1<len1;j1++){
for(int i1=0;i1<len1;i1++){
valb=buffer2+(i1+j1*len1);
//valb2=buffer+(i1+j1*len1);
valout=data+ind*len3+(i1+j1*len1);
indb=bufferi2+(i1+j1*len1);
//indb2=bufferi+(i1+j1*len1);
indo=indout+ind*len3+(i1+j1*len1);
for(int i=0;i<len1;i++){
float minval=valb[0]+z[i+len1];
int minind=0;
for(int j=0;j<len1;j++){
bool b=(valb[j*len2]+z[i-j+len1]<minval);
minval=b?valb[j*len2]+z[i-j+len1]:minval;
minind=b?j:minind;
}
valout[i*len2]=minval;
indo[i*len2]=indb[minind*len2];
}
}
}
}
void regularisationCL(float* costall,float* u0,float* v0,float* w0,float* u1,float* v1,float* w1,int hw,int step1,float quant,int* ordered,int* parents,float* edgemst)
{
int m2=image_m;
int n2=image_n;
int o2=image_o;
int m=m2/step1;
int n=n2/step1;
int o=o2/step1;
timeval time1,time2;
int sz=m*n*o;
int len=hw*2+1;
int len1=len;
int len2=len*len*len;
int len3=len*len*len;
gettimeofday(&time1, NULL);
short *allinds=new short[sz*len2];
float *cost1=new float[len2];
float *vals=new float[len2];
int *inds=new int[len2];
//calculate level boundaries for parallel implementation
int* levels=new int[sz];
for(int i=0;i<sz;i++){
levels[i]=0;
}
for(int i=1;i<sz;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
levels[ochild]=levels[oparent]+1;
}
int maxlev=1+*max_element(levels,levels+sz);
int* numlev=new int[maxlev];
int* startlev=new int[maxlev];
for(int i=0;i<maxlev;i++){
numlev[i]=0;
}
for(int i=0;i<sz;i++){
numlev[levels[i]]++;
}
startlev[0]=numlev[0];
for(int i=1;i<maxlev;i++){ //cumulative sum
startlev[i]=startlev[i-1]+numlev[i];
}
delete levels;
int xs1,ys1,zs1,xx,yy,zz,xx2,yy2,zz2;
for(int i=0;i<len2;i++){
cost1[i]=0;
}
//MAIN LOOP - TO BE PARALLELISED
int frac=(int)(sz/25);
int counti=0;
int counti2=0;
bool* processed=new bool[sz];
for(int i=0;i<sz;i++){
processed[i]=false;
}
int dblcount=0;
float timeCopy=0;
float timeMessage=0;
//calculate mst-cost
for(int lev=maxlev-1;lev>0;lev--){
int start=startlev[lev-1];
int length=numlev[lev];
gettimeofday(&time1, NULL);
for(int i=start;i<start+length;i++){
int ochild=ordered[i];
for(int l=0;l<len2;l++){
costall[ochild*len2+l]*=edgemst[ochild];
}
}
#pragma omp parallel for
for(int i=start;i<start+length;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
float offsetx=(u0[oparent]-u0[ochild])/(float)quant;
float offsety=(v0[oparent]-v0[ochild])/(float)quant;
float offsetz=(w0[oparent]-w0[ochild])/(float)quant;
messageDT(ochild,costall,allinds,len1,offsetx,offsety,offsetz);
}
gettimeofday(&time2, NULL);
timeMessage+=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
gettimeofday(&time1, NULL);
//copy necessary if vectorisation is used (otherwise multiple simultaneous +='s)
int start0=startlev[lev-1];
int length0=numlev[lev];
for(int i=start0;i<start0+length0;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
float minval=*min_element(costall+ochild*len2,costall+ochild*len2+len3);
for(int l=0;l<len2;l++){
costall[oparent*len2+l]+=(costall[ochild*len2+l]-minval);///edgemst[ochild];//transp
//edgemst[ochild]*
}
}
gettimeofday(&time2, NULL);
timeCopy+=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6);
}
//dense displacement space
float* xs=new float[len*len*len];
float* ys=new float[len*len*len];
float* zs=new float[len*len*len];
for(int i=0;i<len;i++){
for(int j=0;j<len;j++){
for(int k=0;k<len;k++){
xs[i+j*len+k*len*len]=(j-hw)*quant;
ys[i+j*len+k*len*len]=(i-hw)*quant;
zs[i+j*len+k*len*len]=(k-hw)*quant;
}
}
}
int *selected=new int[sz];
//mst-cost & select displacement for root note
int i=0;
int oroot=ordered[i];
for(int l=0;l<len2;l++){
cost1[l]=costall[oroot*len2+l];//transp
}
float value=cost1[0]; int index=0;
for(int l=0;l<len2;l++){
if(cost1[l]<value){
value=cost1[l];
index=l;
}
allinds[oroot*len2+l]=l; //transp
}
selected[oroot]=index;
u1[oroot]=xs[index]+u0[oroot];
v1[oroot]=ys[index]+v0[oroot];
w1[oroot]=zs[index]+w0[oroot];
//select displacements and add to previous deformation field
for(int i=1;i<sz;i++){
int ochild=ordered[i];
int oparent=parents[ordered[i]];
//select from argmin of based on parent selection
//index=allinds[ochild+selected[oparent]*sz];
index=allinds[ochild*len2+selected[oparent]]; //transp
selected[ochild]=index;
u1[ochild]=xs[index]+u0[ochild];
v1[ochild]=ys[index]+v0[ochild];
w1[ochild]=zs[index]+w0[ochild];
}
//cout<<"Deformation field calculated!\n";
delete cost1;
delete vals;
delete inds;
delete allinds;
delete selected;
}
|
is.c | /*************************************************************************
* *
* N A S P A R A L L E L B E N C H M A R K S 3.3 *
* *
* O p e n M P V E R S I O N *
* *
* I S *
* *
*************************************************************************
* *
* This benchmark is an OpenMP version of the NPB IS code. *
* It is described in NAS Technical Report 99-011. *
* *
* Permission to use, copy, distribute and modify this software *
* for any purpose with or without fee is hereby granted. We *
* request, however, that all derived work reference the NAS *
* Parallel Benchmarks 3.3. This software is provided "as is" *
* without express or implied warranty. *
* *
* Information on NPB 3.3, including the technical report, the *
* original specifications, source code, results and information *
* on how to submit new results, is available at: *
* *
* http://www.nas.nasa.gov/Software/NPB/ *
* *
* Send comments or suggestions to npb@nas.nasa.gov *
* *
* NAS Parallel Benchmarks Group *
* NASA Ames Research Center *
* Mail Stop: T27A-1 *
* Moffett Field, CA 94035-1000 *
* *
* E-mail: npb@nas.nasa.gov *
* Fax: (650) 604-3957 *
* *
*************************************************************************
* *
* Author: M. Yarrow *
* H. Jin *
* *
*************************************************************************/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "../my_include/my_include.h"
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* To disable the use of buckets, comment out the following line */
#define USE_BUCKETS
/* Uncomment below for cyclic schedule */
/*#define SCHED_CYCLIC*/
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS D */
/*************/
#if CLASS == 'D'
#define TOTAL_KEYS_LOG_2 31
#define MAX_KEY_LOG_2 27
#define NUM_BUCKETS_LOG_2 10
#endif
#if CLASS == 'D'
#define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2)
#else
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#endif
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
#if CLASS == 'D'
typedef long INT_TYPE;
#else
typedef int INT_TYPE;
#endif
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[MAX_KEY],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE],
**key_buff1_aptr = NULL;
#ifdef USE_BUCKETS
INT_TYPE **bucket_size,
bucket_ptrs[NUM_BUCKETS];
#pragma omp threadprivate(bucket_ptrs)
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895},
D_test_index_array[TEST_ARRAY_SIZE] =
{1317351170,995930646,1157283250,1503301535,1453734525},
D_test_rank_array[TEST_ARRAY_SIZE] =
{1,36538729,1978098519,2145192618,2147425337};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
static int KS=0;
static double R23, R46, T23, T46;
#pragma omp threadprivate(KS, R23, R46, T23, T46)
double randlc( double *X, double *A )
{
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************ F I N D _ M Y _ S E E D ************/
/************ ************/
/************ returns parallel random number seq seed ************/
/*****************************************************************/
/*
* Create a random number sequence of total length nn residing
* on np number of processors. Each processor will therefore have a
* subsequence of length nn/np. This routine returns that random
* number which is the first random number for the subsequence belonging
* to processor rank kn, and which is used as seed for proc kn ran # gen.
*/
double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */
int np, /* np = num procs */
long nn, /* total num of ran numbers, all procs */
double s, /* Ran num seed, for ex.: 314159265.00 */
double a ) /* Ran num gen mult, try 1220703125.00 */
{
double t1,t2;
long mq,nq,kk,ik;
if ( kn == 0 ) return s;
mq = (nn/4 + np - 1) / np;
nq = mq * 4 * kn; /* number of rans to be skipped */
t1 = s;
t2 = a;
kk = nq;
while ( kk > 1 ) {
ik = kk / 2;
if( 2 * ik == kk ) {
(void)randlc( &t2, &t2 );
kk = ik;
}
else {
(void)randlc( &t1, &t2 );
kk = kk - 1;
}
}
(void)randlc( &t1, &t2 );
return( t1 );
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x, s;
INT_TYPE i, k;
#pragma omp parallel private(x,s,i,k)
{
INT_TYPE k1, k2;
double an = a;
int myid, num_procs;
INT_TYPE mq;
#ifdef _OPENMP
myid = omp_get_thread_num();
num_procs = omp_get_num_threads();
#else
myid = 0;
num_procs = 1;
#endif
mq = (NUM_KEYS + num_procs - 1) / num_procs;
k1 = mq * myid;
k2 = k1 + mq;
if ( k2 > NUM_KEYS ) k2 = NUM_KEYS;
KS = 0;
s = find_my_seed( myid, num_procs,
(long)4*NUM_KEYS, seed, an );
k = MAX_KEY/4;
for (i=k1; i<k2; i++)
{
x = randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
key_array[i] = k*x;
}
} /*omp parallel*/
}
/*****************************************************************/
/***************** Allocate Working Buffer ****************/
/*****************************************************************/
void *alloc_mem( size_t size )
{
void *p;
p = (void *)malloc(size);
if (!p) {
perror("Memory allocation error");
exit(1);
}
return p;
}
void alloc_key_buff( void )
{
INT_TYPE i;
int num_procs;
#ifdef _OPENMP
num_procs = omp_get_max_threads();
#else
num_procs = 1;
#endif
#ifdef USE_BUCKETS
bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
for (i = 0; i < num_procs; i++) {
bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);
}
#pragma omp parallel for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = 0;
#else /*USE_BUCKETS*/
key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
key_buff1_aptr[0] = key_buff1;
for (i = 1; i < num_procs; i++) {
key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY);
}
#endif /*USE_BUCKETS*/
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify( void )
{
INT_TYPE i, j;
INT_TYPE k, k1, k2;
/* Now, finally, sort the keys: */
/* Copy keys into work array; keys in key_array will be reassigned. */
#ifdef USE_BUCKETS
/* Buckets are already sorted. Sorting keys within each bucket */
#ifdef SCHED_CYCLIC
#pragma omp parallel for private(i,j,k,k1) schedule(static,1)
#else
#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)
#endif
for( j=0; j< NUM_BUCKETS; j++ ) {
k1 = (j > 0)? bucket_ptrs[j-1] : 0;
for ( i = k1; i < bucket_ptrs[j]; i++ ) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
#else
#pragma omp parallel private(i,j,k,k1,k2)
{
#pragma omp for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
/* This is actual sorting. Each thread is responsible for
a subset of key values */
j = omp_get_num_threads();
j = (MAX_KEY + j - 1) / j;
k1 = j * omp_get_thread_num();
k2 = k1 + j;
if (k2 > MAX_KEY) k2 = MAX_KEY;
for( i=0; i<NUM_KEYS; i++ ) {
if (key_buff2[i] >= k1 && key_buff2[i] < k2) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
} /*omp parallel*/
#endif
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for reduction(+:j)
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
printf( "Full_verify: number of keys out of sort: %ld\n", (long)j );
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
#ifdef USE_BUCKETS
int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE num_bucket_keys = (1L << shift);
#endif
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Setup pointers to key buffers */
#ifdef USE_BUCKETS
key_buff_ptr2 = key_buff2;
#else
key_buff_ptr2 = key_array;
#endif
key_buff_ptr = key_buff1;
#pragma omp parallel private(i, k)
{
INT_TYPE *work_buff, m, k1, k2;
int myid = 0, num_procs = 1;
#ifdef _OPENMP
myid = omp_get_thread_num();
num_procs = omp_get_num_threads();
#endif
/* Bucket sort is known to improve cache performance on some */
/* cache based systems. But the actual performance may depend */
/* on cache size, problem size. */
#ifdef USE_BUCKETS
work_buff = bucket_size[myid];
/* Initialize */
for( i=0; i<NUM_BUCKETS; i++ )
work_buff[i] = 0;
/* Determine the number of keys in each bucket */
#pragma omp for schedule(static)
for( i=0; i<NUM_KEYS; i++ )
work_buff[key_array[i] >> shift]++;
/* Accumulative bucket sizes are the bucket pointers.
These are global sizes accumulated upon to each bucket */
bucket_ptrs[0] = 0;
for( k=0; k< myid; k++ )
bucket_ptrs[0] += bucket_size[k][0];
for( i=1; i< NUM_BUCKETS; i++ ) {
bucket_ptrs[i] = bucket_ptrs[i-1];
for( k=0; k< myid; k++ )
bucket_ptrs[i] += bucket_size[k][i];
for( k=myid; k< num_procs; k++ )
bucket_ptrs[i] += bucket_size[k][i-1];
}
/* Sort into appropriate bucket */
#pragma omp for schedule(static)
for( i=0; i<NUM_KEYS; i++ )
{
k = key_array[i];
key_buff2[bucket_ptrs[k >> shift]++] = k;
}
/* The bucket pointers now point to the final accumulated sizes */
if (myid < num_procs-1) {
for( i=0; i< NUM_BUCKETS; i++ )
for( k=myid+1; k< num_procs; k++ )
bucket_ptrs[i] += bucket_size[k][i];
}
/* Now, buckets are sorted. We only need to sort keys inside
each bucket, which can be done in parallel. Because the distribution
of the number of keys in the buckets is Gaussian, the use of
a dynamic schedule should improve load balance, thus, performance */
#ifdef SCHED_CYCLIC
#pragma omp for schedule(static,1)
#else
#pragma omp for schedule(dynamic)
#endif
for( i=0; i< NUM_BUCKETS; i++ ) {
/* Clear the work array section associated with each bucket */
k1 = i * num_bucket_keys;
k2 = k1 + num_bucket_keys;
for ( k = k1; k < k2; k++ )
key_buff_ptr[k] = 0;
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
m = (i > 0)? bucket_ptrs[i-1] : 0;
for ( k = m; k < bucket_ptrs[i]; k++ )
key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
key_buff_ptr[k1] += m;
for ( k = k1+1; k < k2; k++ )
key_buff_ptr[k] += key_buff_ptr[k-1];
}
#else /*USE_BUCKETS*/
work_buff = key_buff1_aptr[myid];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
work_buff[i] = 0;
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
#pragma omp for nowait schedule(static)
for( i=0; i<NUM_KEYS; i++ )
work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population */
for( i=0; i<MAX_KEY-1; i++ )
work_buff[i+1] += work_buff[i];
#pragma omp barrier
/* Accumulate the global key population */
for( k=1; k<num_procs; k++ ) {
#pragma omp for nowait schedule(static)
for( i=0; i<MAX_KEY; i++ )
key_buff_ptr[i] += key_buff1_aptr[k][i];
}
#endif /*USE_BUCKETS*/
} /*omp parallel*/
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 < k && k <= NUM_KEYS-1 )
{
INT_TYPE key_rank = key_buff_ptr[k-1];
int failed = 0;
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+(iteration-2) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+(iteration-1) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-(iteration-1) )
failed = 1;
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'D':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
}
if( failed == 1 )
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, (int)i );
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main( int argc, char **argv )
{
int i, iteration, timer_on;
double timecounter;
FILE *fp;
/* Initialize timers */
timer_on = 0;
if ((fp = fopen("timer.flag", "r")) != NULL) {
fclose(fp);
timer_on = 1;
}
timer_clear( 0 );
if (timer_on) {
timer_clear( 1 );
timer_clear( 2 );
timer_clear( 3 );
}
if (timer_on) timer_start( 3 );
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf
( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" );
printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
#ifdef _OPENMP
printf( " Number of available threads: %d\n", omp_get_max_threads() );
#endif
printf( "\n" );
if (timer_on) timer_start( 1 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
alloc_key_buff();
if (timer_on) timer_stop( 1 );
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
//EasyCrash: candidates of critical data objs: key\_array, key\_buff1, key\_buff2, bucket\_ptrs
//EasyCrash: critical data objs: bucket\_ptrs
///*
//EasyCrash:
EC(bucket_ptrs, NUM_BUCKETS);
clflush(&iteration);
mfence();
//printf("Something wrong here!\n");
//*/
/*
//checkpoint:
checkpoint(&key_array, sizeof(key_array));
checkpoint(&key_buff1, sizeof(key_buff1));
checkpoint(&key_buff2, sizeof(key_buff2));
checkpoint(&bucket_ptrs, sizeof(bucket_ptrs));
checkpoint(&iteration, sizeof(iteration));
mfence();
*/
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
if (timer_on) timer_start( 2 );
full_verify();
if (timer_on) timer_stop( 2 );
if (timer_on) timer_stop( 3 );
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
(int)(TOTAL_KEYS/64),
64,
0,
MAX_ITERATIONS,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS );
/* Print additional timers */
if (timer_on) {
double t_total, t_percent;
t_total = timer_read( 3 );
printf("\nAdditional timers -\n");
printf(" Total execution: %8.3f\n", t_total);
if (t_total == 0.0) t_total = 1.0;
timecounter = timer_read(1);
t_percent = timecounter/t_total * 100.;
printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(0);
t_percent = timecounter/t_total * 100.;
printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(2);
t_percent = timecounter/t_total * 100.;
printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent);
}
return 0;
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
GB_unop__identity_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint32_bool
// op(A') function: GB_unop_tran__identity_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint32_bool
(
uint32_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_ecc.h | #pragma omp declare target
void
kernel_ecc(const fp timeinst,
const fp* initvalu,
fp* finavalu,
const int valu_offset,
const fp* params){
//=====================================================================
// VARIABLES
//=====================================================================
// input parameters
fp cycleLength;
// variable references // GET VARIABLES FROM MEMORY AND SAVE LOCALLY !!!!!!!!!!!!!!!!!!
int offset_1;
int offset_2;
int offset_3;
int offset_4;
int offset_5;
int offset_6;
int offset_7;
int offset_8;
int offset_9;
int offset_10;
int offset_11;
int offset_12;
int offset_13;
int offset_14;
int offset_15;
int offset_16;
int offset_17;
int offset_18;
int offset_19;
int offset_20;
int offset_21;
int offset_22;
int offset_23;
int offset_24;
int offset_25;
int offset_26;
int offset_27;
int offset_28;
int offset_29;
int offset_30;
int offset_31;
int offset_32;
int offset_33;
int offset_34;
int offset_35;
int offset_36;
int offset_37;
int offset_38;
int offset_39;
int offset_40;
int offset_41;
int offset_42;
int offset_43;
int offset_44;
int offset_45;
int offset_46;
// stored input array
fp initvalu_1;
fp initvalu_2;
fp initvalu_3;
fp initvalu_4;
fp initvalu_5;
fp initvalu_6;
fp initvalu_7;
fp initvalu_8;
fp initvalu_9;
fp initvalu_10;
fp initvalu_11;
fp initvalu_12;
fp initvalu_13;
fp initvalu_14;
fp initvalu_15;
fp initvalu_16;
fp initvalu_17;
fp initvalu_18;
fp initvalu_19;
fp initvalu_20;
fp initvalu_21;
// fp initvalu_22;
fp initvalu_23;
fp initvalu_24;
fp initvalu_25;
fp initvalu_26;
fp initvalu_27;
fp initvalu_28;
fp initvalu_29;
fp initvalu_30;
fp initvalu_31;
fp initvalu_32;
fp initvalu_33;
fp initvalu_34;
fp initvalu_35;
fp initvalu_36;
fp initvalu_37;
fp initvalu_38;
fp initvalu_39;
fp initvalu_40;
// fp initvalu_41;
// fp initvalu_42;
// fp initvalu_43;
// fp initvalu_44;
// fp initvalu_45;
// fp initvalu_46;
// matlab constants undefined in c
fp pi;
// Constants
fp R; // [J/kmol*K]
fp Frdy; // [C/mol]
fp Temp; // [K] 310
fp FoRT; //
fp Cmem; // [F] membrane capacitance
fp Qpow;
// Cell geometry
fp cellLength; // cell length [um]
fp cellRadius; // cell radius [um]
// fp junctionLength; // junc length [um]
// fp junctionRadius; // junc radius [um]
// fp distSLcyto; // dist. SL to cytosol [um]
// fp distJuncSL; // dist. junc to SL [um]
// fp DcaJuncSL; // Dca junc to SL [cm^2/sec]
// fp DcaSLcyto; // Dca SL to cyto [cm^2/sec]
// fp DnaJuncSL; // Dna junc to SL [cm^2/sec]
// fp DnaSLcyto; // Dna SL to cyto [cm^2/sec]
fp Vcell; // [L]
fp Vmyo;
fp Vsr;
fp Vsl;
fp Vjunc;
// fp SAjunc; // [um^2]
// fp SAsl; // [um^2]
fp J_ca_juncsl; // [L/msec]
fp J_ca_slmyo; // [L/msec]
fp J_na_juncsl; // [L/msec]
fp J_na_slmyo; // [L/msec]
// Fractional currents in compartments
fp Fjunc;
fp Fsl;
fp Fjunc_CaL;
fp Fsl_CaL;
// Fixed ion concentrations
fp Cli; // Intracellular Cl [mM]
fp Clo; // Extracellular Cl [mM]
fp Ko; // Extracellular K [mM]
fp Nao; // Extracellular Na [mM]
fp Cao; // Extracellular Ca [mM]
fp Mgi; // Intracellular Mg [mM]
// Nernst Potentials
fp ena_junc; // [mV]
fp ena_sl; // [mV]
fp ek; // [mV]
fp eca_junc; // [mV]
fp eca_sl; // [mV]
fp ecl; // [mV]
// Na transport parameters
fp GNa; // [mS/uF]
fp GNaB; // [mS/uF]
fp IbarNaK; // [uA/uF]
fp KmNaip; // [mM]
fp KmKo; // [mM]
// fp Q10NaK;
// fp Q10KmNai;
// K current parameters
fp pNaK;
fp GtoSlow; // [mS/uF]
fp GtoFast; // [mS/uF]
fp gkp;
// Cl current parameters
fp GClCa; // [mS/uF]
fp GClB; // [mS/uF]
fp KdClCa; // [mM] // [mM]
// I_Ca parameters
fp pNa; // [cm/sec]
fp pCa; // [cm/sec]
fp pK; // [cm/sec]
// fp KmCa; // [mM]
fp Q10CaL;
// Ca transport parameters
fp IbarNCX; // [uA/uF]
fp KmCai; // [mM]
fp KmCao; // [mM]
fp KmNai; // [mM]
fp KmNao; // [mM]
fp ksat; // [none]
fp nu; // [none]
fp Kdact; // [mM]
fp Q10NCX; // [none]
fp IbarSLCaP; // [uA/uF]
fp KmPCa; // [mM]
fp GCaB; // [uA/uF]
fp Q10SLCaP; // [none] // [none]
// SR flux parameters
fp Q10SRCaP; // [none]
fp Vmax_SRCaP; // [mM/msec] (mmol/L cytosol/msec)
fp Kmf; // [mM]
fp Kmr; // [mM]L cytosol
fp hillSRCaP; // [mM]
fp ks; // [1/ms]
fp koCa; // [mM^-2 1/ms]
fp kom; // [1/ms]
fp kiCa; // [1/mM/ms]
fp kim; // [1/ms]
fp ec50SR; // [mM]
// Buffering parameters
fp Bmax_Naj; // [mM]
fp Bmax_Nasl; // [mM]
fp koff_na; // [1/ms]
fp kon_na; // [1/mM/ms]
fp Bmax_TnClow; // [mM], TnC low affinity
fp koff_tncl; // [1/ms]
fp kon_tncl; // [1/mM/ms]
fp Bmax_TnChigh; // [mM], TnC high affinity
fp koff_tnchca; // [1/ms]
fp kon_tnchca; // [1/mM/ms]
fp koff_tnchmg; // [1/ms]
fp kon_tnchmg; // [1/mM/ms]
// fp Bmax_CaM; // [mM], CaM buffering
// fp koff_cam; // [1/ms]
// fp kon_cam; // [1/mM/ms]
fp Bmax_myosin; // [mM], Myosin buffering
fp koff_myoca; // [1/ms]
fp kon_myoca; // [1/mM/ms]
fp koff_myomg; // [1/ms]
fp kon_myomg; // [1/mM/ms]
fp Bmax_SR; // [mM]
fp koff_sr; // [1/ms]
fp kon_sr; // [1/mM/ms]
fp Bmax_SLlowsl; // [mM], SL buffering
fp Bmax_SLlowj; // [mM]
fp koff_sll; // [1/ms]
fp kon_sll; // [1/mM/ms]
fp Bmax_SLhighsl; // [mM]
fp Bmax_SLhighj; // [mM]
fp koff_slh; // [1/ms]
fp kon_slh; // [1/mM/ms]
fp Bmax_Csqn; // 140e-3*Vmyo/Vsr; [mM]
fp koff_csqn; // [1/ms]
fp kon_csqn; // [1/mM/ms]
// I_Na: Fast Na Current
fp am;
fp bm;
fp ah;
fp bh;
fp aj;
fp bj;
fp I_Na_junc;
fp I_Na_sl;
// fp I_Na;
// I_nabk: Na Background Current
fp I_nabk_junc;
fp I_nabk_sl;
// fp I_nabk;
// I_nak: Na/K Pump Current
fp sigma;
fp fnak;
fp I_nak_junc;
fp I_nak_sl;
fp I_nak;
// I_kr: Rapidly Activating K Current
fp gkr;
fp xrss;
fp tauxr;
fp rkr;
fp I_kr;
// I_ks: Slowly Activating K Current
fp pcaks_junc;
fp pcaks_sl;
fp gks_junc;
fp gks_sl;
fp eks;
fp xsss;
fp tauxs;
fp I_ks_junc;
fp I_ks_sl;
fp I_ks;
// I_kp: Plateau K current
fp kp_kp;
fp I_kp_junc;
fp I_kp_sl;
fp I_kp;
// I_to: Transient Outward K Current (slow and fast components)
fp xtoss;
fp ytoss;
fp rtoss;
fp tauxtos;
fp tauytos;
fp taurtos;
fp I_tos;
//
fp tauxtof;
fp tauytof;
fp I_tof;
fp I_to;
// I_ki: Time-Independent K Current
fp aki;
fp bki;
fp kiss;
fp I_ki;
// I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current
fp I_ClCa_junc;
fp I_ClCa_sl;
fp I_ClCa;
fp I_Clbk;
// I_Ca: L-type Calcium Current
fp dss;
fp taud;
fp fss;
fp tauf;
//
fp ibarca_j;
fp ibarca_sl;
fp ibark;
fp ibarna_j;
fp ibarna_sl;
fp I_Ca_junc;
fp I_Ca_sl;
fp I_Ca;
fp I_CaK;
fp I_CaNa_junc;
fp I_CaNa_sl;
// fp I_CaNa;
// fp I_Catot;
// I_ncx: Na/Ca Exchanger flux
fp Ka_junc;
fp Ka_sl;
fp s1_junc;
fp s1_sl;
fp s2_junc;
fp s3_junc;
fp s2_sl;
fp s3_sl;
fp I_ncx_junc;
fp I_ncx_sl;
fp I_ncx;
// I_pca: Sarcolemmal Ca Pump Current
fp I_pca_junc;
fp I_pca_sl;
fp I_pca;
// I_cabk: Ca Background Current
fp I_cabk_junc;
fp I_cabk_sl;
fp I_cabk;
// SR fluxes: Calcium Release, SR Ca pump, SR Ca leak
fp MaxSR;
fp MinSR;
fp kCaSR;
fp koSRCa;
fp kiSRCa;
fp RI;
fp J_SRCarel; // [mM/ms]
fp J_serca;
fp J_SRleak; // [mM/ms]
// Cytosolic Ca Buffers
fp J_CaB_cytosol;
// Junctional and SL Ca Buffers
fp J_CaB_junction;
fp J_CaB_sl;
// SR Ca Concentrations
fp oneovervsr;
// Sodium Concentrations
fp I_Na_tot_junc; // [uA/uF]
fp I_Na_tot_sl; // [uA/uF]
fp oneovervsl;
// Potassium Concentration
fp I_K_tot;
// Calcium Concentrations
fp I_Ca_tot_junc; // [uA/uF]
fp I_Ca_tot_sl; // [uA/uF]
// fp junc_sl;
// fp sl_junc;
// fp sl_myo;
// fp myo_sl;
// Simulation type
int state; // 0-none; 1-pace; 2-vclamp
fp I_app;
fp V_hold;
fp V_test;
fp V_clamp;
fp R_clamp;
// Membrane Potential
fp I_Na_tot; // [uA/uF]
fp I_Cl_tot; // [uA/uF]
fp I_Ca_tot;
fp I_tot;
//=====================================================================
// EXECUTION
//=====================================================================
// input parameters
cycleLength = params[15];
// variable references
offset_1 = valu_offset;
offset_2 = valu_offset+1;
offset_3 = valu_offset+2;
offset_4 = valu_offset+3;
offset_5 = valu_offset+4;
offset_6 = valu_offset+5;
offset_7 = valu_offset+6;
offset_8 = valu_offset+7;
offset_9 = valu_offset+8;
offset_10 = valu_offset+9;
offset_11 = valu_offset+10;
offset_12 = valu_offset+11;
offset_13 = valu_offset+12;
offset_14 = valu_offset+13;
offset_15 = valu_offset+14;
offset_16 = valu_offset+15;
offset_17 = valu_offset+16;
offset_18 = valu_offset+17;
offset_19 = valu_offset+18;
offset_20 = valu_offset+19;
offset_21 = valu_offset+20;
offset_22 = valu_offset+21;
offset_23 = valu_offset+22;
offset_24 = valu_offset+23;
offset_25 = valu_offset+24;
offset_26 = valu_offset+25;
offset_27 = valu_offset+26;
offset_28 = valu_offset+27;
offset_29 = valu_offset+28;
offset_30 = valu_offset+29;
offset_31 = valu_offset+30;
offset_32 = valu_offset+31;
offset_33 = valu_offset+32;
offset_34 = valu_offset+33;
offset_35 = valu_offset+34;
offset_36 = valu_offset+35;
offset_37 = valu_offset+36;
offset_38 = valu_offset+37;
offset_39 = valu_offset+38;
offset_40 = valu_offset+39;
offset_41 = valu_offset+40;
offset_42 = valu_offset+41;
offset_43 = valu_offset+42;
offset_44 = valu_offset+43;
offset_45 = valu_offset+44;
offset_46 = valu_offset+45;
// stored input array
initvalu_1 = initvalu[offset_1];
initvalu_2 = initvalu[offset_2];
initvalu_3 = initvalu[offset_3];
initvalu_4 = initvalu[offset_4];
initvalu_5 = initvalu[offset_5];
initvalu_6 = initvalu[offset_6];
initvalu_7 = initvalu[offset_7];
initvalu_8 = initvalu[offset_8];
initvalu_9 = initvalu[offset_9];
initvalu_10 = initvalu[offset_10];
initvalu_11 = initvalu[offset_11];
initvalu_12 = initvalu[offset_12];
initvalu_13 = initvalu[offset_13];
initvalu_14 = initvalu[offset_14];
initvalu_15 = initvalu[offset_15];
initvalu_16 = initvalu[offset_16];
initvalu_17 = initvalu[offset_17];
initvalu_18 = initvalu[offset_18];
initvalu_19 = initvalu[offset_19];
initvalu_20 = initvalu[offset_20];
initvalu_21 = initvalu[offset_21];
// initvalu_22 = initvalu[offset_22];
initvalu_23 = initvalu[offset_23];
initvalu_24 = initvalu[offset_24];
initvalu_25 = initvalu[offset_25];
initvalu_26 = initvalu[offset_26];
initvalu_27 = initvalu[offset_27];
initvalu_28 = initvalu[offset_28];
initvalu_29 = initvalu[offset_29];
initvalu_30 = initvalu[offset_30];
initvalu_31 = initvalu[offset_31];
initvalu_32 = initvalu[offset_32];
initvalu_33 = initvalu[offset_33];
initvalu_34 = initvalu[offset_34];
initvalu_35 = initvalu[offset_35];
initvalu_36 = initvalu[offset_36];
initvalu_37 = initvalu[offset_37];
initvalu_38 = initvalu[offset_38];
initvalu_39 = initvalu[offset_39];
initvalu_40 = initvalu[offset_40];
// initvalu_41 = initvalu[offset_41];
// initvalu_42 = initvalu[offset_42];
// initvalu_43 = initvalu[offset_43];
// initvalu_44 = initvalu[offset_44];
// initvalu_45 = initvalu[offset_45];
// initvalu_46 = initvalu[offset_46];
// matlab constants undefined in c
pi = 3.1416;
// Constants
R = 8314; // [J/kmol*K]
Frdy = 96485; // [C/mol]
Temp = 310; // [K] 310
FoRT = Frdy/R/Temp; //
Cmem = 1.3810e-10; // [F] membrane capacitance
Qpow = (Temp-310)/10;
// Cell geometry
cellLength = 100; // cell length [um]
cellRadius = 10.25; // cell radius [um]
// junctionLength = 160e-3; // junc length [um]
// junctionRadius = 15e-3; // junc radius [um]
// distSLcyto = 0.45; // dist. SL to cytosol [um]
// distJuncSL = 0.5; // dist. junc to SL [um]
// DcaJuncSL = 1.64e-6; // Dca junc to SL [cm^2/sec]
// DcaSLcyto = 1.22e-6; // Dca SL to cyto [cm^2/sec]
// DnaJuncSL = 1.09e-5; // Dna junc to SL [cm^2/sec]
// DnaSLcyto = 1.79e-5; // Dna SL to cyto [cm^2/sec]
Vcell = pi*powf(cellRadius,(fp)2)*cellLength*1e-15; // [L]
Vmyo = 0.65*Vcell;
Vsr = 0.035*Vcell;
Vsl = 0.02*Vcell;
Vjunc = 0.0539*0.01*Vcell;
// SAjunc = 20150*pi*2*junctionLength*junctionRadius; // [um^2]
// SAsl = pi*2*cellRadius*cellLength; // [um^2]
J_ca_juncsl = 1/1.2134e12; // [L/msec]
J_ca_slmyo = 1/2.68510e11; // [L/msec]
J_na_juncsl = 1/(1.6382e12/3*100); // [L/msec]
J_na_slmyo = 1/(1.8308e10/3*100); // [L/msec]
// Fractional currents in compartments
Fjunc = 0.11;
Fsl = 1-Fjunc;
Fjunc_CaL = 0.9;
Fsl_CaL = 1-Fjunc_CaL;
// Fixed ion concentrations
Cli = 15; // Intracellular Cl [mM]
Clo = 150; // Extracellular Cl [mM]
Ko = 5.4; // Extracellular K [mM]
Nao = 140; // Extracellular Na [mM]
Cao = 1.8; // Extracellular Ca [mM]
Mgi = 1; // Intracellular Mg [mM]
// Nernst Potentials
ena_junc = (1/FoRT)*logf(Nao/initvalu_32); // [mV]
ena_sl = (1/FoRT)*logf(Nao/initvalu_33); // [mV]
ek = (1/FoRT)*logf(Ko/initvalu_35); // [mV]
eca_junc = (1/FoRT/2)*logf(Cao/initvalu_36); // [mV]
eca_sl = (1/FoRT/2)*logf(Cao/initvalu_37); // [mV]
ecl = (1/FoRT)*logf(Cli/Clo); // [mV]
// Na transport parameters
GNa = 16.0; // [mS/uF]
GNaB = 0.297e-3; // [mS/uF]
IbarNaK = 1.90719; // [uA/uF]
KmNaip = 11; // [mM]
KmKo = 1.5; // [mM]
// Q10NaK = 1.63;
// Q10KmNai = 1.39;
// K current parameters
pNaK = 0.01833;
GtoSlow = 0.06; // [mS/uF]
GtoFast = 0.02; // [mS/uF]
gkp = 0.001;
// Cl current parameters
GClCa = 0.109625; // [mS/uF]
GClB = 9e-3; // [mS/uF]
KdClCa = 100e-3; // [mM]
// I_Ca parameters
pNa = 1.5e-8; // [cm/sec]
pCa = 5.4e-4; // [cm/sec]
pK = 2.7e-7; // [cm/sec]
// KmCa = 0.6e-3; // [mM]
Q10CaL = 1.8;
// Ca transport parameters
IbarNCX = 9.0; // [uA/uF]
KmCai = 3.59e-3; // [mM]
KmCao = 1.3; // [mM]
KmNai = 12.29; // [mM]
KmNao = 87.5; // [mM]
ksat = 0.27; // [none]
nu = 0.35; // [none]
Kdact = 0.256e-3; // [mM]
Q10NCX = 1.57; // [none]
IbarSLCaP = 0.0673; // [uA/uF]
KmPCa = 0.5e-3; // [mM]
GCaB = 2.513e-4; // [uA/uF]
Q10SLCaP = 2.35; // [none]
// SR flux parameters
Q10SRCaP = 2.6; // [none]
Vmax_SRCaP = 2.86e-4; // [mM/msec] (mmol/L cytosol/msec)
Kmf = 0.246e-3; // [mM]
Kmr = 1.7; // [mM]L cytosol
hillSRCaP = 1.787; // [mM]
ks = 25; // [1/ms]
koCa = 10; // [mM^-2 1/ms]
kom = 0.06; // [1/ms]
kiCa = 0.5; // [1/mM/ms]
kim = 0.005; // [1/ms]
ec50SR = 0.45; // [mM]
// Buffering parameters
Bmax_Naj = 7.561; // [mM]
Bmax_Nasl = 1.65; // [mM]
koff_na = 1e-3; // [1/ms]
kon_na = 0.1e-3; // [1/mM/ms]
Bmax_TnClow = 70e-3; // [mM], TnC low affinity
koff_tncl = 19.6e-3; // [1/ms]
kon_tncl = 32.7; // [1/mM/ms]
Bmax_TnChigh = 140e-3; // [mM], TnC high affinity
koff_tnchca = 0.032e-3; // [1/ms]
kon_tnchca = 2.37; // [1/mM/ms]
koff_tnchmg = 3.33e-3; // [1/ms]
kon_tnchmg = 3e-3; // [1/mM/ms]
// Bmax_CaM = 24e-3; // [mM], CaM buffering
// koff_cam = 238e-3; // [1/ms]
// kon_cam = 34; // [1/mM/ms]
Bmax_myosin = 140e-3; // [mM], Myosin buffering
koff_myoca = 0.46e-3; // [1/ms]
kon_myoca = 13.8; // [1/mM/ms]
koff_myomg = 0.057e-3; // [1/ms]
kon_myomg = 0.0157; // [1/mM/ms]
Bmax_SR = 19*0.9e-3; // [mM]
koff_sr = 60e-3; // [1/ms]
kon_sr = 100; // [1/mM/ms]
Bmax_SLlowsl = 37.38e-3*Vmyo/Vsl; // [mM], SL buffering
Bmax_SLlowj = 4.62e-3*Vmyo/Vjunc*0.1; // [mM]
koff_sll = 1300e-3; // [1/ms]
kon_sll = 100; // [1/mM/ms]
Bmax_SLhighsl = 13.35e-3*Vmyo/Vsl; // [mM]
Bmax_SLhighj = 1.65e-3*Vmyo/Vjunc*0.1; // [mM]
koff_slh = 30e-3; // [1/ms]
kon_slh = 100; // [1/mM/ms]
Bmax_Csqn = 2.7; // 140e-3*Vmyo/Vsr; [mM]
koff_csqn = 65; // [1/ms]
kon_csqn = 100; // [1/mM/ms]
// I_Na: Fast Na Current
am = 0.32*(initvalu_39+47.13)/(1-expf(-0.1*(initvalu_39+47.13)));
bm = 0.08*expf(-initvalu_39/11);
if(initvalu_39 >= -40){
ah = 0; aj = 0;
bh = 1/(0.13*(1+expf(-(initvalu_39+10.66)/11.1)));
bj = 0.3*expf(-2.535e-7*initvalu_39)/(1+expf(-0.1*(initvalu_39+32)));
}
else{
ah = 0.135*expf((80+initvalu_39)/-6.8);
bh = 3.56*expf(0.079*initvalu_39)+3.1e5*expf(0.35*initvalu_39);
aj = (-127140*expf(0.2444*initvalu_39)-3.474e-5*expf(-0.04391*initvalu_39))*(initvalu_39+37.78)/(1+expf(0.311*(initvalu_39+79.23)));
bj = 0.1212*expf(-0.01052*initvalu_39)/(1+expf(-0.1378*(initvalu_39+40.14)));
}
finavalu[offset_1] = am*(1-initvalu_1)-bm*initvalu_1;
finavalu[offset_2] = ah*(1-initvalu_2)-bh*initvalu_2;
finavalu[offset_3] = aj*(1-initvalu_3)-bj*initvalu_3;
I_Na_junc = Fjunc*GNa*powf(initvalu_1,(fp)3)*initvalu_2*initvalu_3*(initvalu_39-ena_junc);
I_Na_sl = Fsl*GNa*powf(initvalu_1,(fp)3)*initvalu_2*initvalu_3*(initvalu_39-ena_sl);
// I_Na = I_Na_junc+I_Na_sl;
// I_nabk: Na Background Current
I_nabk_junc = Fjunc*GNaB*(initvalu_39-ena_junc);
I_nabk_sl = Fsl*GNaB*(initvalu_39-ena_sl);
// I_nabk = I_nabk_junc+I_nabk_sl;
// I_nak: Na/K Pump Current
sigma = (expf(Nao/67.3)-1)/7;
fnak = 1/(1+0.1245*expf(-0.1*initvalu_39*FoRT)+0.0365*sigma*expf(-initvalu_39*FoRT));
I_nak_junc = Fjunc*IbarNaK*fnak*Ko /(1+powf((KmNaip/initvalu_32),(fp)4)) /(Ko+KmKo);
I_nak_sl = Fsl*IbarNaK*fnak*Ko /(1+powf((KmNaip/initvalu_33),(fp)4)) /(Ko+KmKo);
I_nak = I_nak_junc+I_nak_sl;
// I_kr: Rapidly Activating K Current
gkr = 0.03*sqrtf(Ko/5.4);
xrss = 1/(1+expf(-(initvalu_39+50)/7.5));
tauxr = 1/(0.00138*(initvalu_39+7)/(1-expf(-0.123*(initvalu_39+7)))+6.1e-4*(initvalu_39+10)/(expf(0.145*(initvalu_39+10))-1));
finavalu[offset_12] = (xrss-initvalu_12)/tauxr;
rkr = 1/(1+expf((initvalu_39+33)/22.4));
I_kr = gkr*initvalu_12*rkr*(initvalu_39-ek);
// I_ks: Slowly Activating K Current
pcaks_junc = -log10f(initvalu_36)+3.0;
pcaks_sl = -log10f(initvalu_37)+3.0;
gks_junc = 0.07*(0.057 +0.19/(1+ expf((-7.2+pcaks_junc)/0.6)));
gks_sl = 0.07*(0.057 +0.19/(1+ expf((-7.2+pcaks_sl)/0.6)));
eks = (1/FoRT)*logf((Ko+pNaK*Nao)/(initvalu_35+pNaK*initvalu_34));
xsss = 1/(1+expf(-(initvalu_39-1.5)/16.7));
tauxs = 1/(7.19e-5*(initvalu_39+30)/(1-expf(-0.148*(initvalu_39+30)))+1.31e-4*(initvalu_39+30)/(expf(0.0687*(initvalu_39+30))-1));
finavalu[offset_13] = (xsss-initvalu_13)/tauxs;
I_ks_junc = Fjunc*gks_junc*powf(initvalu_12,(fp)2)*(initvalu_39-eks);
I_ks_sl = Fsl*gks_sl*powf(initvalu_13,(fp)2)*(initvalu_39-eks);
I_ks = I_ks_junc+I_ks_sl;
// I_kp: Plateau K current
kp_kp = 1/(1+expf(7.488-initvalu_39/5.98));
I_kp_junc = Fjunc*gkp*kp_kp*(initvalu_39-ek);
I_kp_sl = Fsl*gkp*kp_kp*(initvalu_39-ek);
I_kp = I_kp_junc+I_kp_sl;
// I_to: Transient Outward K Current (slow and fast components)
xtoss = 1/(1+expf(-(initvalu_39+3.0)/15));
ytoss = 1/(1+expf((initvalu_39+33.5)/10));
rtoss = 1/(1+expf((initvalu_39+33.5)/10));
tauxtos = 9/(1+expf((initvalu_39+3.0)/15))+0.5;
tauytos = 3e3/(1+expf((initvalu_39+60.0)/10))+30;
taurtos = 2800/(1+expf((initvalu_39+60.0)/10))+220;
finavalu[offset_8] = (xtoss-initvalu_8)/tauxtos;
finavalu[offset_9] = (ytoss-initvalu_9)/tauytos;
finavalu[offset_40]= (rtoss-initvalu_40)/taurtos;
I_tos = GtoSlow*initvalu_8*(initvalu_9+0.5*initvalu_40)*(initvalu_39-ek); // [uA/uF]
//
tauxtof = 3.5*expf(-initvalu_39*initvalu_39/30/30)+1.5;
tauytof = 20.0/(1+expf((initvalu_39+33.5)/10))+20.0;
finavalu[offset_10] = (xtoss-initvalu_10)/tauxtof;
finavalu[offset_11] = (ytoss-initvalu_11)/tauytof;
I_tof = GtoFast*initvalu_10*initvalu_11*(initvalu_39-ek);
I_to = I_tos + I_tof;
// I_ki: Time-Independent K Current
aki = 1.02/(1+expf(0.2385*(initvalu_39-ek-59.215)));
bki =(0.49124*expf(0.08032*(initvalu_39+5.476-ek)) + expf(0.06175*(initvalu_39-ek-594.31))) /(1 + expf(-0.5143*(initvalu_39-ek+4.753)));
kiss = aki/(aki+bki);
I_ki = 0.9*sqrtf(Ko/5.4)*kiss*(initvalu_39-ek);
// I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current
I_ClCa_junc = Fjunc*GClCa/(1+KdClCa/initvalu_36)*(initvalu_39-ecl);
I_ClCa_sl = Fsl*GClCa/(1+KdClCa/initvalu_37)*(initvalu_39-ecl);
I_ClCa = I_ClCa_junc+I_ClCa_sl;
I_Clbk = GClB*(initvalu_39-ecl);
// I_Ca: L-type Calcium Current
dss = 1/(1+expf(-(initvalu_39+14.5)/6.0));
taud = dss*(1-expf(-(initvalu_39+14.5)/6.0))/(0.035*(initvalu_39+14.5));
fss = 1/(1+expf((initvalu_39+35.06)/3.6))+0.6/(1+expf((50-initvalu_39)/20));
tauf = 1/(0.0197*expf(-powf(0.0337*(initvalu_39+14.5),2.0))+0.02); // double-precision version of pow
finavalu[offset_4] = (dss-initvalu_4)/taud;
finavalu[offset_5] = (fss-initvalu_5)/tauf;
finavalu[offset_6] = 1.7*initvalu_36*(1-initvalu_6)-11.9e-3*initvalu_6; // fCa_junc
finavalu[offset_7] = 1.7*initvalu_37*(1-initvalu_7)-11.9e-3*initvalu_7; // fCa_sl
//
ibarca_j = pCa*4*(initvalu_39*Frdy*FoRT) * (0.341*initvalu_36*expf(2*initvalu_39*FoRT)-0.341*Cao) /(expf(2*initvalu_39*FoRT)-1);
ibarca_sl = pCa*4*(initvalu_39*Frdy*FoRT) * (0.341*initvalu_37*expf(2*initvalu_39*FoRT)-0.341*Cao) /(expf(2*initvalu_39*FoRT)-1);
ibark = pK*(initvalu_39*Frdy*FoRT)*(0.75*initvalu_35*expf(initvalu_39*FoRT)-0.75*Ko) /(expf(initvalu_39*FoRT)-1);
ibarna_j = pNa*(initvalu_39*Frdy*FoRT) *(0.75*initvalu_32*expf(initvalu_39*FoRT)-0.75*Nao) /(expf(initvalu_39*FoRT)-1);
ibarna_sl = pNa*(initvalu_39*Frdy*FoRT) *(0.75*initvalu_33*expf(initvalu_39*FoRT)-0.75*Nao) /(expf(initvalu_39*FoRT)-1);
I_Ca_junc = (Fjunc_CaL*ibarca_j*initvalu_4*initvalu_5*(1-initvalu_6)*powf(Q10CaL,Qpow))*0.45;
I_Ca_sl = (Fsl_CaL*ibarca_sl*initvalu_4*initvalu_5*(1-initvalu_7)*powf(Q10CaL,Qpow))*0.45;
I_Ca = I_Ca_junc+I_Ca_sl;
finavalu[offset_43]=-I_Ca*Cmem/(Vmyo*2*Frdy)*1e3;
I_CaK = (ibark*initvalu_4*initvalu_5*(Fjunc_CaL*(1-initvalu_6)+Fsl_CaL*(1-initvalu_7))*powf(Q10CaL,Qpow))*0.45;
I_CaNa_junc = (Fjunc_CaL*ibarna_j*initvalu_4*initvalu_5*(1-initvalu_6)*powf(Q10CaL,Qpow))*0.45;
I_CaNa_sl = (Fsl_CaL*ibarna_sl*initvalu_4*initvalu_5*(1-initvalu_7)*powf(Q10CaL,Qpow))*0.45;
// I_CaNa = I_CaNa_junc+I_CaNa_sl;
// I_Catot = I_Ca+I_CaK+I_CaNa;
// I_ncx: Na/Ca Exchanger flux
Ka_junc = 1/(1+powf((Kdact/initvalu_36),(fp)3));
Ka_sl = 1/(1+powf((Kdact/initvalu_37),(fp)3));
s1_junc = expf(nu*initvalu_39*FoRT)*powf(initvalu_32,(fp)3)*Cao;
s1_sl = expf(nu*initvalu_39*FoRT)*powf(initvalu_33,(fp)3)*Cao;
s2_junc = expf((nu-1)*initvalu_39*FoRT)*powf(Nao,(fp)3)*initvalu_36;
s3_junc = (KmCai*powf(Nao,(fp)3)*(1+powf((initvalu_32/KmNai),(fp)3))+powf(KmNao,(fp)3)*initvalu_36+ powf(KmNai,(fp)3)*Cao*(1+initvalu_36/KmCai)+KmCao*powf(initvalu_32,(fp)3)+powf(initvalu_32,(fp)3)*Cao+powf(Nao,(fp)3)*initvalu_36)*(1+ksat*expf((nu-1)*initvalu_39*FoRT));
s2_sl = expf((nu-1)*initvalu_39*FoRT)*powf(Nao,(fp)3)*initvalu_37;
s3_sl = (KmCai*powf(Nao,(fp)3)*(1+powf((initvalu_33/KmNai),(fp)3)) + powf(KmNao,(fp)3)*initvalu_37+powf(KmNai,(fp)3)*Cao*(1+initvalu_37/KmCai)+KmCao*powf(initvalu_33,(fp)3)+powf(initvalu_33,(fp)3)*Cao+powf(Nao,(fp)3)*initvalu_37)*(1+ksat*expf((nu-1)*initvalu_39*FoRT));
I_ncx_junc = Fjunc*IbarNCX*powf(Q10NCX,Qpow)*Ka_junc*(s1_junc-s2_junc)/s3_junc;
I_ncx_sl = Fsl*IbarNCX*powf(Q10NCX,Qpow)*Ka_sl*(s1_sl-s2_sl)/s3_sl;
I_ncx = I_ncx_junc+I_ncx_sl;
finavalu[offset_45]=2*I_ncx*Cmem/(Vmyo*2*Frdy)*1e3;
// I_pca: Sarcolemmal Ca Pump Current
I_pca_junc = Fjunc*powf(Q10SLCaP,Qpow)*IbarSLCaP*powf(initvalu_36,(fp)(1.6))/(powf(KmPCa,(fp)(1.6))+powf(initvalu_36,(fp)(1.6)));
I_pca_sl = Fsl*powf(Q10SLCaP,Qpow)*IbarSLCaP*powf(initvalu_37,(fp)(1.6))/(powf(KmPCa,(fp)(1.6))+powf(initvalu_37,(fp)(1.6)));
I_pca = I_pca_junc+I_pca_sl;
finavalu[offset_44]=-I_pca*Cmem/(Vmyo*2*Frdy)*1e3;
// I_cabk: Ca Background Current
I_cabk_junc = Fjunc*GCaB*(initvalu_39-eca_junc);
I_cabk_sl = Fsl*GCaB*(initvalu_39-eca_sl);
I_cabk = I_cabk_junc+I_cabk_sl;
finavalu[offset_46]=-I_cabk*Cmem/(Vmyo*2*Frdy)*1e3;
// SR fluxes: Calcium Release, SR Ca pump, SR Ca leak
MaxSR = 15;
MinSR = 1;
kCaSR = MaxSR - (MaxSR-MinSR)/(1+powf(ec50SR/initvalu_31,(fp)(2.5)));
koSRCa = koCa/kCaSR;
kiSRCa = kiCa*kCaSR;
RI = 1-initvalu_14-initvalu_15-initvalu_16;
finavalu[offset_14] = (kim*RI-kiSRCa*initvalu_36*initvalu_14)-(koSRCa*powf(initvalu_36,(fp)2)*initvalu_14-kom*initvalu_15); // R
finavalu[offset_15] = (koSRCa*powf(initvalu_36,(fp)2)*initvalu_14-kom*initvalu_15)-(kiSRCa*initvalu_36*initvalu_15-kim*initvalu_16); // O
finavalu[offset_16] = (kiSRCa*initvalu_36*initvalu_15-kim*initvalu_16)-(kom*initvalu_16-koSRCa*powf(initvalu_36,(fp)2)*RI); // I
J_SRCarel = ks*initvalu_15*(initvalu_31-initvalu_36); // [mM/ms]
J_serca = powf(Q10SRCaP,Qpow)*Vmax_SRCaP*(powf((initvalu_38/Kmf),hillSRCaP)-powf((initvalu_31/Kmr),hillSRCaP))
/(1+powf((initvalu_38/Kmf),hillSRCaP)+powf((initvalu_31/Kmr),hillSRCaP));
J_SRleak = 5.348e-6*(initvalu_31-initvalu_36); // [mM/ms]
// Sodium and Calcium Buffering
finavalu[offset_17] = kon_na*initvalu_32*(Bmax_Naj-initvalu_17)-koff_na*initvalu_17; // NaBj [mM/ms]
finavalu[offset_18] = kon_na*initvalu_33*(Bmax_Nasl-initvalu_18)-koff_na*initvalu_18; // NaBsl [mM/ms]
// Cytosolic Ca Buffers
finavalu[offset_19] = kon_tncl*initvalu_38*(Bmax_TnClow-initvalu_19)-koff_tncl*initvalu_19; // TnCL [mM/ms]
finavalu[offset_20] = kon_tnchca*initvalu_38*(Bmax_TnChigh-initvalu_20-initvalu_21)-koff_tnchca*initvalu_20; // TnCHc [mM/ms]
finavalu[offset_21] = kon_tnchmg*Mgi*(Bmax_TnChigh-initvalu_20-initvalu_21)-koff_tnchmg*initvalu_21; // TnCHm [mM/ms]
finavalu[offset_22] = 0; // CaM [mM/ms]
finavalu[offset_23] = kon_myoca*initvalu_38*(Bmax_myosin-initvalu_23-initvalu_24)-koff_myoca*initvalu_23; // Myosin_ca [mM/ms]
finavalu[offset_24] = kon_myomg*Mgi*(Bmax_myosin-initvalu_23-initvalu_24)-koff_myomg*initvalu_24; // Myosin_mg [mM/ms]
finavalu[offset_25] = kon_sr*initvalu_38*(Bmax_SR-initvalu_25)-koff_sr*initvalu_25; // SRB [mM/ms]
J_CaB_cytosol = finavalu[offset_19] + finavalu[offset_20] + finavalu[offset_21] + finavalu[offset_22] + finavalu[offset_23] + finavalu[offset_24] + finavalu[offset_25];
// Junctional and SL Ca Buffers
finavalu[offset_26] = kon_sll*initvalu_36*(Bmax_SLlowj-initvalu_26)-koff_sll*initvalu_26; // SLLj [mM/ms]
finavalu[offset_27] = kon_sll*initvalu_37*(Bmax_SLlowsl-initvalu_27)-koff_sll*initvalu_27; // SLLsl [mM/ms]
finavalu[offset_28] = kon_slh*initvalu_36*(Bmax_SLhighj-initvalu_28)-koff_slh*initvalu_28; // SLHj [mM/ms]
finavalu[offset_29] = kon_slh*initvalu_37*(Bmax_SLhighsl-initvalu_29)-koff_slh*initvalu_29; // SLHsl [mM/ms]
J_CaB_junction = finavalu[offset_26]+finavalu[offset_28];
J_CaB_sl = finavalu[offset_27]+finavalu[offset_29];
// SR Ca Concentrations
finavalu[offset_30] = kon_csqn*initvalu_31*(Bmax_Csqn-initvalu_30)-koff_csqn*initvalu_30; // Csqn [mM/ms]
oneovervsr = 1/Vsr;
finavalu[offset_31] = J_serca*Vmyo*oneovervsr-(J_SRleak*Vmyo*oneovervsr+J_SRCarel)-finavalu[offset_30]; // Ca_sr [mM/ms] %Ratio 3 leak current
// Sodium Concentrations
I_Na_tot_junc = I_Na_junc+I_nabk_junc+3*I_ncx_junc+3*I_nak_junc+I_CaNa_junc; // [uA/uF]
I_Na_tot_sl = I_Na_sl+I_nabk_sl+3*I_ncx_sl+3*I_nak_sl+I_CaNa_sl; // [uA/uF]
finavalu[offset_32] = -I_Na_tot_junc*Cmem/(Vjunc*Frdy)+J_na_juncsl/Vjunc*(initvalu_33-initvalu_32)-finavalu[offset_17];
oneovervsl = 1/Vsl;
finavalu[offset_33] = -I_Na_tot_sl*Cmem*oneovervsl/Frdy+J_na_juncsl*oneovervsl*(initvalu_32-initvalu_33)+J_na_slmyo*oneovervsl*(initvalu_34-initvalu_33)-finavalu[offset_18];
finavalu[offset_34] = J_na_slmyo/Vmyo*(initvalu_33-initvalu_34); // [mM/msec]
// Potassium Concentration
I_K_tot = I_to+I_kr+I_ks+I_ki-2*I_nak+I_CaK+I_kp; // [uA/uF]
finavalu[offset_35] = 0; // [mM/msec]
// Calcium Concentrations
I_Ca_tot_junc = I_Ca_junc+I_cabk_junc+I_pca_junc-2*I_ncx_junc; // [uA/uF]
I_Ca_tot_sl = I_Ca_sl+I_cabk_sl+I_pca_sl-2*I_ncx_sl; // [uA/uF]
finavalu[offset_36] = -I_Ca_tot_junc*Cmem/(Vjunc*2*Frdy)+J_ca_juncsl/Vjunc*(initvalu_37-initvalu_36)
- J_CaB_junction+(J_SRCarel)*Vsr/Vjunc+J_SRleak*Vmyo/Vjunc; // Ca_j
finavalu[offset_37] = -I_Ca_tot_sl*Cmem/(Vsl*2*Frdy)+J_ca_juncsl/Vsl*(initvalu_36-initvalu_37)
+ J_ca_slmyo/Vsl*(initvalu_38-initvalu_37)-J_CaB_sl; // Ca_sl
finavalu[offset_38] = -J_serca-J_CaB_cytosol +J_ca_slmyo/Vmyo*(initvalu_37-initvalu_38);
// junc_sl=J_ca_juncsl/Vsl*(initvalu_36-initvalu_37);
// sl_junc=J_ca_juncsl/Vjunc*(initvalu_37-initvalu_36);
// sl_myo=J_ca_slmyo/Vsl*(initvalu_38-initvalu_37);
// myo_sl=J_ca_slmyo/Vmyo*(initvalu_37-initvalu_38);
// Simulation type
state = 1;
switch(state){
case 0:
I_app = 0;
break;
case 1: // pace w/ current injection at cycleLength 'cycleLength'
if(fmod(timeinst,cycleLength) <= 5){
I_app = 9.5;
}
else{
I_app = 0.0;
}
break;
case 2:
V_hold = -55;
V_test = 0;
if(timeinst>0.5 & timeinst<200.5){
V_clamp = V_test;
}
else{
V_clamp = V_hold;
}
R_clamp = 0.04;
I_app = (V_clamp-initvalu_39)/R_clamp;
break;
}
// Membrane Potential
I_Na_tot = I_Na_tot_junc + I_Na_tot_sl; // [uA/uF]
I_Cl_tot = I_ClCa+I_Clbk; // [uA/uF]
I_Ca_tot = I_Ca_tot_junc+I_Ca_tot_sl;
I_tot = I_Na_tot+I_Cl_tot+I_Ca_tot+I_K_tot;
finavalu[offset_39] = -(I_tot-I_app);
// Set unused output values to 0 (MATLAB does it by default)
finavalu[offset_41] = 0;
finavalu[offset_42] = 0;
}
#pragma omp end declare target
|
gravity_avx2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include "avx.h"
#include "avx2.h"
#include "avx_type.h"
#include "gravity.h"
#define IPARA 2
#define JPARA 4
#define SEC 2.0
#define THD 3.0
#define JMEMSIZE 262144
#define ALIGN32 __attribute__ ((aligned(32)))
#define ALIGN128 __attribute__ ((aligned(128)))
#define ALIGN256 __attribute__ ((aligned(256)))
#define predict(dt, x, v, aby2, jby6) \
((x) + (v) * (dt) + (aby2) * (dt) * (dt) + (jby6) * (dt) * (dt) * (dt))
static float ALIGN32 three[NVECS] = {3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0};
static float ALIGN32 threefourth[NVECS] = {0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75};
static float ALIGN32 flag[NVECS] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
static double time;
static int nblen[NPIPES];
static int nbl[NPIPES][MAXLEN];
static int nblerror;
static struct Ptcl_Mem{
double pos[3];
double vel[3];
double acc[3];
double jrk[3];
double mss;
double tim;
int idx;
int pad[3];
} ptcl_mem[JMEMSIZE] ALIGN128;
typedef struct Pred_Mem * pPred_Mem;
static struct Pred_Mem{
double xpos[NVECD], ypos[NVECD], zpos[NVECD];
float indx[NVECS], mass[NVECS];
float xvel[NVECS], yvel[NVECS], zvel[NVECS];
} pred_mem[JMEMSIZE] ALIGN256;
typedef struct NeighbourList * pNeighbourList;
static struct NeighbourList{
float flag[NVECS];
} (*neighbour)[JMEMSIZE];
typedef struct Iparticle * pIparticle;
struct Iparticle{
double xpos0[NVECD], xpos1[NVECD];
double ypos0[NVECD], ypos1[NVECD];
double zpos0[NVECD], zpos1[NVECD];
float xvel01[NVECS], yvel01[NVECS], zvel01[NVECS];
float id01[NVECS], veps2[NVECS];
double xacc[NVECD], yacc[NVECD], zacc[NVECD], pot[NVECD];
float xjrk[NVECS], yjrk[NVECS], zjrk[NVECS];
float rmin2[NVECS], in[NVECS];
float hinv[NVECS];
};
#define NVAR_IP 21
void avx_debugfunc(void)
{
int j;
for(j = 0; j < 1024; j++){
printf("%4d %+.13E %+.13E\n", j, ptcl_mem[j].acc[0], ptcl_mem[j].jrk[0]);
}
return;
}
void avx_open(int nthread)
{
int ret;
ret = posix_memalign((void **)&neighbour, 32, sizeof(struct NeighbourList) * JMEMSIZE * nthread);
assert(ret == 0);
return;
}
void avx_close(void)
{
free(neighbour);
return;
}
void avx_set_j_particle(int padr, int pidx, double tim, double mss,
double *pos, double *vel, double *acc, double *jrk)
{
ptcl_mem[padr].pos[0] = pos[0];
ptcl_mem[padr].pos[1] = pos[1];
ptcl_mem[padr].pos[2] = pos[2];
ptcl_mem[padr].vel[0] = vel[0];
ptcl_mem[padr].vel[1] = vel[1];
ptcl_mem[padr].vel[2] = vel[2];
ptcl_mem[padr].acc[0] = acc[0];
ptcl_mem[padr].acc[1] = acc[1];
ptcl_mem[padr].acc[2] = acc[2];
ptcl_mem[padr].jrk[0] = jrk[0];
ptcl_mem[padr].jrk[1] = jrk[1];
ptcl_mem[padr].jrk[2] = jrk[2];
ptcl_mem[padr].mss = mss;
ptcl_mem[padr].tim = tim;
ptcl_mem[padr].idx = pidx;
return;
}
void avx_set_ti(double tim)
{
time = tim;
return;
}
void avx_initialize_neighbourlist(void)
{
nblerror = 0;
return;
}
int avx_get_neighbourlist_error(void)
{
return nblerror;
}
int avx_get_neighbourlist(int ipipe, int maxlen, int *nblenfunc, int *nblfunc)
{
int j;
if(nblen[ipipe] > maxlen){
return 1;
}else{
*nblenfunc = nblen[ipipe];
for(j = 0; j < nblen[ipipe]; j++)
nblfunc[j] = nbl[ipipe][j];
return 0;
}
}
void avx_predict_j_particle(int nj)
{
int j, jmod;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(j = 0; j < nj; j += JPARA){
int jmem = j / JPARA;
int jj;
for(jj = 0; jj < JPARA; jj++){
int jadr = j + jj;
int j2 = jj + JPARA;
double dt = time - ptcl_mem[jadr].tim;
pred_mem[jmem].xpos[jj] = predict(dt, ptcl_mem[jadr].pos[0], ptcl_mem[jadr].vel[0], ptcl_mem[jadr].acc[0], ptcl_mem[jadr].jrk[0]);
pred_mem[jmem].ypos[jj] = predict(dt, ptcl_mem[jadr].pos[1], ptcl_mem[jadr].vel[1], ptcl_mem[jadr].acc[1], ptcl_mem[jadr].jrk[1]);
pred_mem[jmem].zpos[jj] = predict(dt, ptcl_mem[jadr].pos[2], ptcl_mem[jadr].vel[2], ptcl_mem[jadr].acc[2], ptcl_mem[jadr].jrk[2]);
pred_mem[jmem].indx[jj] = pred_mem[jmem].indx[j2] = (float)ptcl_mem[jadr].idx;
pred_mem[jmem].mass[jj] = pred_mem[jmem].mass[j2] = (float)ptcl_mem[jadr].mss;
pred_mem[jmem].xvel[jj] = predict(dt, ptcl_mem[jadr].vel[0], SEC * ptcl_mem[jadr].acc[0], THD * ptcl_mem[jadr].jrk[0], 0.0);
pred_mem[jmem].xvel[j2] = pred_mem[jmem].xvel[jj];
pred_mem[jmem].yvel[jj] = predict(dt, ptcl_mem[jadr].vel[1], SEC * ptcl_mem[jadr].acc[1], THD * ptcl_mem[jadr].jrk[1], 0.0);
pred_mem[jmem].yvel[j2] = pred_mem[jmem].yvel[jj];
pred_mem[jmem].zvel[jj] = predict(dt, ptcl_mem[jadr].vel[2], SEC * ptcl_mem[jadr].acc[2], THD * ptcl_mem[jadr].jrk[2], 0.0);
pred_mem[jmem].zvel[j2] = pred_mem[jmem].zvel[jj];
}
}
if((jmod = nj % JPARA) != 0){
int jj;
int jmem = nj / JPARA;
for(jj = JPARA - 1; jj >= jmod; jj--){
pred_mem[jmem].xpos[jj] = 1e3;
pred_mem[jmem].ypos[jj] = 1e3;
pred_mem[jmem].zpos[jj] = 1e3;
pred_mem[jmem].mass[jj] = 0.0;
pred_mem[jmem].mass[jj+4] = 0.0;
}
}
return;
}
void gravity_kernels(int nj, double eps2, pPrdPosVel posvel, pNewAccJrk accjerk)
{
int i, j, jj;
float id;
double pxp, pyp, pzp;
double vxp, vyp, vzp;
double ax, ay, az;
double jx, jy, jz;
double pot;
double r2, rinv, rinv2, rinv3, rv;
double dpx, dpy, dpz;
double dvx, dvy, dvz;
pPred_Mem jptr;
for(i = 0; i < IPARA; i++){
id = posvel[i].id;
pxp = posvel[i].xpos;
pyp = posvel[i].ypos;
pzp = posvel[i].zpos;
vxp = posvel[i].xvel;
vyp = posvel[i].yvel;
vzp = posvel[i].zvel;
ax = ay = az = jx = jy = jz = pot = 0.0;
for(j = 0, jptr = pred_mem; j < nj; j += JPARA, jptr++){
for(jj = 0; jj < JPARA; jj++){
if(jptr->indx[jj] == id)
continue;
dpx = jptr->xpos[jj] - pxp;
dpy = jptr->ypos[jj] - pyp;
dpz = jptr->zpos[jj] - pzp;
dvx = jptr->xvel[jj] - vxp;
dvy = jptr->yvel[jj] - vyp;
dvz = jptr->zvel[jj] - vzp;
r2 = dpx * dpx + dpy * dpy + dpz * dpz + eps2;
rv = dpx * dvx + dpy * dvy + dpz * dvz;
rinv2 = 1.0 / r2;
rinv = sqrt(rinv2);
rv *= rinv2 * 3.0;
rinv *= jptr->mass[jj];
rinv3 = rinv * rinv2;
pot -= rinv;
dpx *= rinv3; ax += dpx;
dpy *= rinv3; ay += dpy;
dpz *= rinv3; az += dpz;
dvx *= rinv3; jx += dvx;
dvy *= rinv3; jy += dvy;
dvz *= rinv3; jz += dvz;
dpx *= rv; jx -= dpx;
dpy *= rv; jy -= dpy;
dpz *= rv; jz -= dpz;
}
}
accjerk[i].xacc = ax;
accjerk[i].yacc = ay;
accjerk[i].zacc = az;
accjerk[i].xjrk = jx;
accjerk[i].yjrk = jy;
accjerk[i].zjrk = jz;
accjerk[i].pot = pot;
}
return;
}
void gravity_kernel(int nj, pPrdPosVel posvel, pNewAccJrk accjerk)
{
int ret;
int j;
pPred_Mem jptr = pred_mem;
pIparticle iptr;
ret = posix_memalign((void **)&iptr, 32, NVAR_IP * 32);
assert(ret == 0);
VBROADCASTSD(posvel[0].xpos, YMM00);
VBROADCASTSD(posvel[0].ypos, YMM01);
VBROADCASTSD(posvel[0].zpos, YMM02);
VBROADCASTSD(posvel[1].xpos, YMM03);
VBROADCASTSD(posvel[1].ypos, YMM04);
VBROADCASTSD(posvel[1].zpos, YMM05);
VBROADCASTSS(posvel[0].xvel, XMM06);
VBROADCASTSS(posvel[1].xvel, XMM07);
VMERGE(YMM06, YMM07, YMM06);
VBROADCASTSS(posvel[0].yvel, XMM08);
VBROADCASTSS(posvel[1].yvel, XMM09);
VMERGE(YMM08, YMM09, YMM07);
VBROADCASTSS(posvel[0].zvel, XMM10);
VBROADCASTSS(posvel[1].zvel, XMM11);
VMERGE(YMM10, YMM11, YMM08);
VBROADCASTSS(posvel[0].id, XMM12);
VBROADCASTSS(posvel[1].id, XMM13);
VMERGE(YMM12, YMM13, YMM09);
VBROADCASTSS(posvel[0].eps2, XMM14);
VBROADCASTSS(posvel[1].eps2, XMM15);
VMERGE(YMM14, YMM15, YMM10);
VSTORPD(YMM00, iptr->xpos0[0]);
VSTORPD(YMM01, iptr->ypos0[0]);
VSTORPD(YMM02, iptr->zpos0[0]);
VSTORPD(YMM03, iptr->xpos1[0]);
VSTORPD(YMM04, iptr->ypos1[0]);
VSTORPD(YMM05, iptr->zpos1[0]);
VSTORPS(YMM06, iptr->xvel01[0]);
VSTORPS(YMM07, iptr->yvel01[0]);
VSTORPS(YMM08, iptr->zvel01[0]);
VSTORPS(YMM09, iptr->id01[0]);
VSTORPS(YMM10, iptr->veps2[0]);
VZEROALL;
for(j = 0; j < nj; j += JPARA, jptr++){ // if nj % 2 != 0 ATARU
// dx -> YMM03
VLOADPD(jptr->xpos[0], YMM00);
VSUBPD_M(iptr->xpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->xpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM03);
// dy -> YMM04
VLOADPD(jptr->ypos[0], YMM00);
VSUBPD_M(iptr->ypos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->ypos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM04);
// dz -> YMM05
VLOADPD(jptr->zpos[0], YMM00);
VSUBPD_M(iptr->zpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->zpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM05);
// dr^2
VLOADPS(iptr->veps2[0], YMM01);
VFMADDPS(YMM01, YMM03, YMM03);
VFMADDPS(YMM01, YMM04, YMM04);
VFMADDPS(YMM01, YMM05, YMM05);
// - 2 / r -> YMM01
VRSQRTPS(YMM01, YMM02);
VMULPS(YMM02, YMM01, YMM01);
VFMSUB213PS_M(three[0], YMM02, YMM01);
VMULPS(YMM02, YMM01, YMM01);
// mask
VLOADPS(jptr->indx[0], YMM02);
VLOADPS(iptr->id01[0], YMM00);
VCMPNEQPS(YMM00, YMM02, YMM02);
VANDPS(YMM02, YMM01, YMM01);
// potential
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VCVTPS2PD(XMM02, YMM00);
VUP2LOW(YMM02, XMM06);
VCVTPS2PD(XMM06, YMM06);
VHADDPD(YMM06, YMM00, YMM07);
VADDPD(YMM07, YMM09, YMM09);
// dvx, dvy, dvz (vj - vi)
VLOADPS(jptr->xvel[0], YMM06);
VSUBPS_M(iptr->xvel01[0], YMM06, YMM06);
VLOADPS(jptr->yvel[0], YMM07);
VSUBPS_M(iptr->yvel01[0], YMM07, YMM07);
VLOADPS(jptr->zvel[0], YMM08);
VSUBPS_M(iptr->zvel01[0], YMM08, YMM08);
// xv -> YMM00
VMULPS(YMM03, YMM06, YMM00);
VFMADDPS(YMM00, YMM04, YMM07);
VFMADDPS(YMM00, YMM05, YMM08);
// YMM00: 3.0 * xv / r^2, YMM02: - m / r^3
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VMULPS(YMM01, YMM01, YMM01);
VMULPS(YMM01, YMM00, YMM00);
VMULPS(YMM01, YMM02, YMM02);
VMULPS_M(threefourth[0], YMM00, YMM00);
// prefetch
PREFETCH((jptr+1)->xpos[0]);
PREFETCH((jptr+1)->zpos[0]);
PREFETCH((jptr+1)->mass[0]);
PREFETCH((jptr+1)->yvel[0]);
// jx1, jy1, jz1
VFMADDPS(YMM13, YMM02, YMM06);
VFMADDPS(YMM14, YMM02, YMM07);
VFMADDPS(YMM15, YMM02, YMM08);
// ax
VMULPS(YMM02, YMM03, YMM03);
VCVTPS2PD(XMM03, YMM06);
VUP2LOW(YMM03, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM10, YMM10);
// ay
VMULPS(YMM02, YMM04, YMM04);
VCVTPS2PD(XMM04, YMM06);
VUP2LOW(YMM04, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM11, YMM11);
// az
VMULPS(YMM02, YMM05, YMM05);
VCVTPS2PD(XMM05, YMM06);
VUP2LOW(YMM05, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM12, YMM12);
// jx2, jy2, jz2
VFNMADDPS(YMM13, YMM00, YMM03);
VFNMADDPS(YMM14, YMM00, YMM04);
VFNMADDPS(YMM15, YMM00, YMM05);
}
VSTORPD(YMM09, iptr->pot[0]);
VSTORPD(YMM10, iptr->xacc[0]);
VSTORPD(YMM11, iptr->yacc[0]);
VSTORPD(YMM12, iptr->zacc[0]);
VSTORPS(YMM13, iptr->xjrk[0]);
VSTORPS(YMM14, iptr->yjrk[0]);
VSTORPS(YMM15, iptr->zjrk[0]);
VZEROUPPER;
accjerk[0].xacc = iptr->xacc[0] + iptr->xacc[2];
accjerk[0].yacc = iptr->yacc[0] + iptr->yacc[2];
accjerk[0].zacc = iptr->zacc[0] + iptr->zacc[2];
accjerk[0].pot = iptr->pot[0] + iptr->pot[2];
accjerk[0].xjrk = iptr->xjrk[0] + iptr->xjrk[1] + iptr->xjrk[2] + iptr->xjrk[3];
accjerk[0].yjrk = iptr->yjrk[0] + iptr->yjrk[1] + iptr->yjrk[2] + iptr->yjrk[3];
accjerk[0].zjrk = iptr->zjrk[0] + iptr->zjrk[1] + iptr->zjrk[2] + iptr->zjrk[3];
accjerk[1].xacc = iptr->xacc[1] + iptr->xacc[3];
accjerk[1].yacc = iptr->yacc[1] + iptr->yacc[3];
accjerk[1].zacc = iptr->zacc[1] + iptr->zacc[3];
accjerk[1].pot = iptr->pot[1] + iptr->pot[3];
accjerk[1].xjrk = iptr->xjrk[4] + iptr->xjrk[5] + iptr->xjrk[6] + iptr->xjrk[7];
accjerk[1].yjrk = iptr->yjrk[4] + iptr->yjrk[5] + iptr->yjrk[6] + iptr->yjrk[7];
accjerk[1].zjrk = iptr->zjrk[4] + iptr->zjrk[5] + iptr->zjrk[6] + iptr->zjrk[7];
free(iptr);
return;
}
void gravity_kernel2(int nj, pPrdPosVel posvel, pNewAccJrk accjerk)
{
int ret;
int j;
double true_rmin2;
pPred_Mem jptr = pred_mem;
pIparticle iptr;
float ten = 10.0, minusone = -1.0;
ret = posix_memalign((void **)&iptr, 32, NVAR_IP * 32);
assert(ret == 0);
VBROADCASTSD(posvel[0].xpos, YMM00);
VBROADCASTSD(posvel[0].ypos, YMM01);
VBROADCASTSD(posvel[0].zpos, YMM02);
VBROADCASTSD(posvel[1].xpos, YMM03);
VBROADCASTSD(posvel[1].ypos, YMM04);
VBROADCASTSD(posvel[1].zpos, YMM05);
VBROADCASTSS(posvel[0].xvel, XMM06);
VBROADCASTSS(posvel[1].xvel, XMM07);
VMERGE(YMM06, YMM07, YMM06);
VBROADCASTSS(posvel[0].yvel, XMM08);
VBROADCASTSS(posvel[1].yvel, XMM09);
VMERGE(YMM08, YMM09, YMM07);
VBROADCASTSS(posvel[0].zvel, XMM10);
VBROADCASTSS(posvel[1].zvel, XMM11);
VMERGE(YMM10, YMM11, YMM08);
VBROADCASTSS(posvel[0].id, XMM12);
VBROADCASTSS(posvel[1].id, XMM13);
VMERGE(YMM12, YMM13, YMM09);
VBROADCASTSS(posvel[0].eps2, XMM14);
VBROADCASTSS(posvel[1].eps2, XMM15);
VMERGE(YMM14, YMM15, YMM10);
VBROADCASTSS(ten, YMM11);
VBROADCASTSS(minusone, YMM12);
VSTORPD(YMM00, iptr->xpos0[0]);
VSTORPD(YMM01, iptr->ypos0[0]);
VSTORPD(YMM02, iptr->zpos0[0]);
VSTORPD(YMM03, iptr->xpos1[0]);
VSTORPD(YMM04, iptr->ypos1[0]);
VSTORPD(YMM05, iptr->zpos1[0]);
VSTORPS(YMM06, iptr->xvel01[0]);
VSTORPS(YMM07, iptr->yvel01[0]);
VSTORPS(YMM08, iptr->zvel01[0]);
VSTORPS(YMM09, iptr->id01[0]);
VSTORPS(YMM10, iptr->veps2[0]);
VSTORPS(YMM11, iptr->rmin2[0]);
VSTORPS(YMM12, iptr->in[0]);
VZEROALL;
for(j = 0; j < nj; j += JPARA, jptr++){ // if nj % 2 != 0 ATARU
// dx -> YMM03
VLOADPD(jptr->xpos[0], YMM00);
VSUBPD_M(iptr->xpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->xpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM03);
// dy -> YMM04
VLOADPD(jptr->ypos[0], YMM00);
VSUBPD_M(iptr->ypos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->ypos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM04);
// dz -> YMM05
VLOADPD(jptr->zpos[0], YMM00);
VSUBPD_M(iptr->zpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->zpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM05);
// dr^2
VLOADPS(iptr->veps2[0], YMM01);
VFMADDPS(YMM01, YMM03, YMM03);
VFMADDPS(YMM01, YMM04, YMM04);
VFMADDPS(YMM01, YMM05, YMM05);
// - 2 / r -> YMM01
VRSQRTPS(YMM01, YMM02);
VMULPS(YMM02, YMM01, YMM01);
VFMSUB213PS_M(three[0], YMM02, YMM01);
VMULPS(YMM02, YMM01, YMM01);
// mask
VLOADPS(jptr->indx[0], YMM02);
VLOADPS(iptr->id01[0], YMM00);
VCMPNEQPS(YMM00, YMM02, YMM02);
VANDPS(YMM02, YMM01, YMM01);
// nearest neighbour (free: YMM00, YMM02, YMM06, YMM07, YMM08)
VLOADPS(iptr->rmin2[0], YMM00);
VMINPS(YMM01, YMM00, YMM02);
VSTORPS(YMM02, iptr->rmin2[0]);
VCMPPS(YMM01, YMM00, YMM02, GT);
VLOADPS(jptr->indx[0], YMM06);
VANDPS(YMM02, YMM06, YMM07);
VCMPPS(YMM01, YMM00, YMM08, LE);
VANDPS_M(iptr->in[0], YMM08, YMM08);
VADDPS(YMM08, YMM07, YMM07);
VSTORPS(YMM07, iptr->in[0]);
// potential
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VCVTPS2PD(XMM02, YMM00);
VUP2LOW(YMM02, XMM06);
VCVTPS2PD(XMM06, YMM06);
VHADDPD(YMM06, YMM00, YMM07);
VADDPD(YMM07, YMM09, YMM09);
// dvx, dvy, dvz (vj - vi)
VLOADPS(jptr->xvel[0], YMM06);
VSUBPS_M(iptr->xvel01[0], YMM06, YMM06);
VLOADPS(jptr->yvel[0], YMM07);
VSUBPS_M(iptr->yvel01[0], YMM07, YMM07);
VLOADPS(jptr->zvel[0], YMM08);
VSUBPS_M(iptr->zvel01[0], YMM08, YMM08);
// xv -> YMM00
VMULPS(YMM03, YMM06, YMM00);
VFMADDPS(YMM00, YMM04, YMM07);
VFMADDPS(YMM00, YMM05, YMM08);
// YMM00: 3.0 * xv / r^2, YMM02: - m / r^3
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VMULPS(YMM01, YMM01, YMM01);
VMULPS(YMM01, YMM00, YMM00);
VMULPS(YMM01, YMM02, YMM02);
VMULPS_M(threefourth[0], YMM00, YMM00);
// prefetch
PREFETCH((jptr+1)->xpos[0]);
PREFETCH((jptr+1)->zpos[0]);
PREFETCH((jptr+1)->mass[0]);
PREFETCH((jptr+1)->yvel[0]);
// jx1, jy1, jz1
VFMADDPS(YMM13, YMM02, YMM06);
VFMADDPS(YMM14, YMM02, YMM07);
VFMADDPS(YMM15, YMM02, YMM08);
// ax
VMULPS(YMM02, YMM03, YMM03);
VCVTPS2PD(XMM03, YMM06);
VUP2LOW(YMM03, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM10, YMM10);
// ay
VMULPS(YMM02, YMM04, YMM04);
VCVTPS2PD(XMM04, YMM06);
VUP2LOW(YMM04, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM11, YMM11);
// az
VMULPS(YMM02, YMM05, YMM05);
VCVTPS2PD(XMM05, YMM06);
VUP2LOW(YMM05, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM12, YMM12);
// jx2, jy2, jz2
VFNMADDPS(YMM13, YMM00, YMM03);
VFNMADDPS(YMM14, YMM00, YMM04);
VFNMADDPS(YMM15, YMM00, YMM05);
}
VSTORPD(YMM09, iptr->pot[0]);
VSTORPD(YMM10, iptr->xacc[0]);
VSTORPD(YMM11, iptr->yacc[0]);
VSTORPD(YMM12, iptr->zacc[0]);
VSTORPS(YMM13, iptr->xjrk[0]);
VSTORPS(YMM14, iptr->yjrk[0]);
VSTORPS(YMM15, iptr->zjrk[0]);
accjerk[0].xacc = iptr->xacc[0] + iptr->xacc[2];
accjerk[0].yacc = iptr->yacc[0] + iptr->yacc[2];
accjerk[0].zacc = iptr->zacc[0] + iptr->zacc[2];
accjerk[0].pot = iptr->pot[0] + iptr->pot[2];
accjerk[0].xjrk = iptr->xjrk[0] + iptr->xjrk[1] + iptr->xjrk[2] + iptr->xjrk[3];
accjerk[0].yjrk = iptr->yjrk[0] + iptr->yjrk[1] + iptr->yjrk[2] + iptr->yjrk[3];
accjerk[0].zjrk = iptr->zjrk[0] + iptr->zjrk[1] + iptr->zjrk[2] + iptr->zjrk[3];
for(true_rmin2 = 1e30, j = 0; j < JPARA; j++){
if(iptr->rmin2[j] < true_rmin2){
true_rmin2 = iptr->rmin2[j];
accjerk[0].rnnb = - 2.0 / true_rmin2;
accjerk[0].nnb = (int)iptr->in[j];
}
}
accjerk[1].xacc = iptr->xacc[1] + iptr->xacc[3];
accjerk[1].yacc = iptr->yacc[1] + iptr->yacc[3];
accjerk[1].zacc = iptr->zacc[1] + iptr->zacc[3];
accjerk[1].pot = iptr->pot[1] + iptr->pot[3];
accjerk[1].xjrk = iptr->xjrk[4] + iptr->xjrk[5] + iptr->xjrk[6] + iptr->xjrk[7];
accjerk[1].yjrk = iptr->yjrk[4] + iptr->yjrk[5] + iptr->yjrk[6] + iptr->yjrk[7];
accjerk[1].zjrk = iptr->zjrk[4] + iptr->zjrk[5] + iptr->zjrk[6] + iptr->zjrk[7];
for(true_rmin2 = 1e30, j = 4; j < 4 + JPARA; j++){
if(iptr->rmin2[j] < true_rmin2){
true_rmin2 = iptr->rmin2[j];
accjerk[1].rnnb = - 2.0 / true_rmin2;
accjerk[1].nnb = (int)iptr->in[j];
}
}
free(iptr);
return;
}
void gravity_kerneln(int nj, pPrdPosVel posvel, pNewAccJrk accjerk, int i, int ithread)
{
int ret;
int j;
float hinv0, hinv1;
pPred_Mem jptr = pred_mem;
pIparticle iptr;
pNeighbourList nbptr, nbptr0 = neighbour[ithread];
if(posvel[0].h2 == 0.0)
hinv0 = - 1e10;
else
hinv0 = - 2.0 / sqrt(posvel[0].h2);
if(posvel[1].h2 == 0.0)
hinv1 = - 1e10;
else
hinv1 = - 2.0 / sqrt(posvel[1].h2);
ret = posix_memalign((void **)&iptr, 32, NVAR_IP * 32);
assert(ret == 0);
VBROADCASTSD(posvel[0].xpos, YMM00);
VBROADCASTSD(posvel[0].ypos, YMM01);
VBROADCASTSD(posvel[0].zpos, YMM02);
VBROADCASTSD(posvel[1].xpos, YMM03);
VBROADCASTSD(posvel[1].ypos, YMM04);
VBROADCASTSD(posvel[1].zpos, YMM05);
VBROADCASTSS(posvel[0].xvel, XMM06);
VBROADCASTSS(posvel[1].xvel, XMM07);
VMERGE(YMM06, YMM07, YMM06);
VBROADCASTSS(posvel[0].yvel, XMM08);
VBROADCASTSS(posvel[1].yvel, XMM09);
VMERGE(YMM08, YMM09, YMM07);
VBROADCASTSS(posvel[0].zvel, XMM10);
VBROADCASTSS(posvel[1].zvel, XMM11);
VMERGE(YMM10, YMM11, YMM08);
VBROADCASTSS(posvel[0].id, XMM12);
VBROADCASTSS(posvel[1].id, XMM13);
VMERGE(YMM12, YMM13, YMM09);
VBROADCASTSS(posvel[0].eps2, XMM14);
VBROADCASTSS(posvel[1].eps2, XMM15);
VMERGE(YMM14, YMM15, YMM10);
VBROADCASTSS(hinv0, XMM11);
VBROADCASTSS(hinv1, XMM12);
VMERGE(YMM11, YMM12, YMM11);
VSTORPD(YMM00, iptr->xpos0[0]);
VSTORPD(YMM01, iptr->ypos0[0]);
VSTORPD(YMM02, iptr->zpos0[0]);
VSTORPD(YMM03, iptr->xpos1[0]);
VSTORPD(YMM04, iptr->ypos1[0]);
VSTORPD(YMM05, iptr->zpos1[0]);
VSTORPS(YMM06, iptr->xvel01[0]);
VSTORPS(YMM07, iptr->yvel01[0]);
VSTORPS(YMM08, iptr->zvel01[0]);
VSTORPS(YMM09, iptr->id01[0]);
VSTORPS(YMM10, iptr->veps2[0]);
VSTORPS(YMM11, iptr->hinv[0]);
VZEROALL;
for(j = 0, nbptr = nbptr0; j < nj; j += JPARA, jptr++, nbptr++){ // if nj % 2 != 0 ATARU
// dx -> YMM03
VLOADPD(jptr->xpos[0], YMM00);
VSUBPD_M(iptr->xpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->xpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM03);
// dy -> YMM04
VLOADPD(jptr->ypos[0], YMM00);
VSUBPD_M(iptr->ypos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->ypos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM04);
// dz -> YMM05
VLOADPD(jptr->zpos[0], YMM00);
VSUBPD_M(iptr->zpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->zpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM05);
// dr^2
VLOADPS(iptr->veps2[0], YMM01);
VFMADDPS(YMM01, YMM03, YMM03);
VFMADDPS(YMM01, YMM04, YMM04);
VFMADDPS(YMM01, YMM05, YMM05);
// - 2 / r -> YMM01
VRSQRTPS(YMM01, YMM02);
VMULPS(YMM02, YMM01, YMM01);
VFMSUB213PS_M(three[0], YMM02, YMM01);
VMULPS(YMM02, YMM01, YMM01);
// mask
VLOADPS(jptr->indx[0], YMM02);
VLOADPS(iptr->id01[0], YMM00);
VCMPNEQPS(YMM00, YMM02, YMM02);
VANDPS(YMM02, YMM01, YMM01);
// neighbour list
VLOADPS(iptr->hinv[0], YMM00);
VCMPPS(YMM00, YMM01, YMM00, LE);
VLOADPS(flag[0], YMM02);
VANDPS(YMM02, YMM00, YMM00);
VSTORPS(YMM00, nbptr->flag[0]);
// potential
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VCVTPS2PD(XMM02, YMM00);
VUP2LOW(YMM02, XMM06);
VCVTPS2PD(XMM06, YMM06);
VHADDPD(YMM06, YMM00, YMM07);
VADDPD(YMM07, YMM09, YMM09);
// dvx, dvy, dvz (vj - vi)
VLOADPS(jptr->xvel[0], YMM06);
VSUBPS_M(iptr->xvel01[0], YMM06, YMM06);
VLOADPS(jptr->yvel[0], YMM07);
VSUBPS_M(iptr->yvel01[0], YMM07, YMM07);
VLOADPS(jptr->zvel[0], YMM08);
VSUBPS_M(iptr->zvel01[0], YMM08, YMM08);
// xv -> YMM00
VMULPS(YMM03, YMM06, YMM00);
VFMADDPS(YMM00, YMM04, YMM07);
VFMADDPS(YMM00, YMM05, YMM08);
// YMM00: 3.0 * xv / r^2, YMM02: - m / r^3
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VMULPS(YMM01, YMM01, YMM01);
VMULPS(YMM01, YMM00, YMM00);
VMULPS(YMM01, YMM02, YMM02);
VMULPS_M(threefourth[0], YMM00, YMM00);
// prefetch
PREFETCH((jptr+1)->xpos[0]);
PREFETCH((jptr+1)->zpos[0]);
PREFETCH((jptr+1)->mass[0]);
PREFETCH((jptr+1)->yvel[0]);
// jx1, jy1, jz1
VFMADDPS(YMM13, YMM02, YMM06);
VFMADDPS(YMM14, YMM02, YMM07);
VFMADDPS(YMM15, YMM02, YMM08);
// ax
VMULPS(YMM02, YMM03, YMM03);
VCVTPS2PD(XMM03, YMM06);
VUP2LOW(YMM03, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM10, YMM10);
// ay
VMULPS(YMM02, YMM04, YMM04);
VCVTPS2PD(XMM04, YMM06);
VUP2LOW(YMM04, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM11, YMM11);
// az
VMULPS(YMM02, YMM05, YMM05);
VCVTPS2PD(XMM05, YMM06);
VUP2LOW(YMM05, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM12, YMM12);
// jx2, jy2, jz2
VFNMADDPS(YMM13, YMM00, YMM03);
VFNMADDPS(YMM14, YMM00, YMM04);
VFNMADDPS(YMM15, YMM00, YMM05);
}
VSTORPD(YMM09, iptr->pot[0]);
VSTORPD(YMM10, iptr->xacc[0]);
VSTORPD(YMM11, iptr->yacc[0]);
VSTORPD(YMM12, iptr->zacc[0]);
VSTORPS(YMM13, iptr->xjrk[0]);
VSTORPS(YMM14, iptr->yjrk[0]);
VSTORPS(YMM15, iptr->zjrk[0]);
accjerk[0].xacc = iptr->xacc[0] + iptr->xacc[2];
accjerk[0].yacc = iptr->yacc[0] + iptr->yacc[2];
accjerk[0].zacc = iptr->zacc[0] + iptr->zacc[2];
accjerk[0].pot = iptr->pot[0] + iptr->pot[2];
accjerk[0].xjrk = iptr->xjrk[0] + iptr->xjrk[1] + iptr->xjrk[2] + iptr->xjrk[3];
accjerk[0].yjrk = iptr->yjrk[0] + iptr->yjrk[1] + iptr->yjrk[2] + iptr->yjrk[3];
accjerk[0].zjrk = iptr->zjrk[0] + iptr->zjrk[1] + iptr->zjrk[2] + iptr->zjrk[3];
accjerk[1].xacc = iptr->xacc[1] + iptr->xacc[3];
accjerk[1].yacc = iptr->yacc[1] + iptr->yacc[3];
accjerk[1].zacc = iptr->zacc[1] + iptr->zacc[3];
accjerk[1].pot = iptr->pot[1] + iptr->pot[3];
accjerk[1].xjrk = iptr->xjrk[4] + iptr->xjrk[5] + iptr->xjrk[6] + iptr->xjrk[7];
accjerk[1].yjrk = iptr->yjrk[4] + iptr->yjrk[5] + iptr->yjrk[6] + iptr->yjrk[7];
accjerk[1].zjrk = iptr->zjrk[4] + iptr->zjrk[5] + iptr->zjrk[6] + iptr->zjrk[7];
int jj;
int nn0, nn1;
for(nn0 = nn1 = 0, j = 0, jptr = pred_mem, nbptr = nbptr0; j < nj; j += JPARA, jptr++, nbptr++){
for(jj = 0; jj < JPARA; jj++)
if(nbptr->flag[jj] == 1.0){
nbl[i][nn0] = (int)jptr->indx[jj];
++nn0;
}
for(jj = 4; jj < JPARA + 4; jj++)
if(nbptr->flag[jj] == 1.0){
nbl[i+1][nn1] = (int)jptr->indx[jj];
++nn1;
}
}
if(nn0 > MAXLEN || nn1 > MAXLEN)
nblerror = 1;
nblen[i] = nn0;
nblen[i+1] = nn1;
free(iptr);
return;
}
void gravity_kernel2n(int nj, pPrdPosVel posvel, pNewAccJrk accjerk, int i, int ithread)
{
int ret;
int j;
double true_rmin2;
float hinv0, hinv1;
pPred_Mem jptr = pred_mem;
pIparticle iptr;
pNeighbourList nbptr, nbptr0 = neighbour[ithread];
float ten = 10.0, minusone = -1.0;
if(posvel[0].h2 == 0.0)
hinv0 = - 1e10;
else
hinv0 = - 2.0 / sqrt(posvel[0].h2);
if(posvel[1].h2 == 0.0)
hinv1 = - 1e10;
else
hinv1 = - 2.0 / sqrt(posvel[1].h2);
ret = posix_memalign((void **)&iptr, 32, NVAR_IP * 32);
assert(ret == 0);
VBROADCASTSD(posvel[0].xpos, YMM00);
VBROADCASTSD(posvel[0].ypos, YMM01);
VBROADCASTSD(posvel[0].zpos, YMM02);
VBROADCASTSD(posvel[1].xpos, YMM03);
VBROADCASTSD(posvel[1].ypos, YMM04);
VBROADCASTSD(posvel[1].zpos, YMM05);
VBROADCASTSS(posvel[0].xvel, XMM06);
VBROADCASTSS(posvel[1].xvel, XMM07);
VMERGE(YMM06, YMM07, YMM06);
VBROADCASTSS(posvel[0].yvel, XMM08);
VBROADCASTSS(posvel[1].yvel, XMM09);
VMERGE(YMM08, YMM09, YMM07);
VBROADCASTSS(posvel[0].zvel, XMM10);
VBROADCASTSS(posvel[1].zvel, XMM11);
VMERGE(YMM10, YMM11, YMM08);
VBROADCASTSS(posvel[0].id, XMM12);
VBROADCASTSS(posvel[1].id, XMM13);
VMERGE(YMM12, YMM13, YMM09);
VBROADCASTSS(posvel[0].eps2, XMM14);
VBROADCASTSS(posvel[1].eps2, XMM15);
VMERGE(YMM14, YMM15, YMM10);
VBROADCASTSS(hinv0, XMM11);
VBROADCASTSS(hinv1, XMM12);
VMERGE(YMM11, YMM12, YMM11);
VBROADCASTSS(ten, YMM12);
VBROADCASTSS(minusone, YMM13);
VSTORPD(YMM00, iptr->xpos0[0]);
VSTORPD(YMM01, iptr->ypos0[0]);
VSTORPD(YMM02, iptr->zpos0[0]);
VSTORPD(YMM03, iptr->xpos1[0]);
VSTORPD(YMM04, iptr->ypos1[0]);
VSTORPD(YMM05, iptr->zpos1[0]);
VSTORPS(YMM06, iptr->xvel01[0]);
VSTORPS(YMM07, iptr->yvel01[0]);
VSTORPS(YMM08, iptr->zvel01[0]);
VSTORPS(YMM09, iptr->id01[0]);
VSTORPS(YMM10, iptr->veps2[0]);
VSTORPS(YMM11, iptr->hinv[0]);
VSTORPS(YMM12, iptr->rmin2[0]);
VSTORPS(YMM13, iptr->in[0]);
VZEROALL;
for(j = 0, nbptr = nbptr0; j < nj; j += JPARA, jptr++, nbptr++){ // if nj % 2 != 0 ATARU
// dx -> YMM03
VLOADPD(jptr->xpos[0], YMM00);
VSUBPD_M(iptr->xpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->xpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM03);
// dy -> YMM04
VLOADPD(jptr->ypos[0], YMM00);
VSUBPD_M(iptr->ypos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->ypos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM04);
// dz -> YMM05
VLOADPD(jptr->zpos[0], YMM00);
VSUBPD_M(iptr->zpos0[0], YMM00, YMM01);
VCVTPD2PS(YMM01, XMM01);
VSUBPD_M(iptr->zpos1[0], YMM00, YMM02);
VCVTPD2PS(YMM02, XMM02);
VMERGE(YMM01, YMM02, YMM05);
// dr^2
VLOADPS(iptr->veps2[0], YMM01);
VFMADDPS(YMM01, YMM03, YMM03);
VFMADDPS(YMM01, YMM04, YMM04);
VFMADDPS(YMM01, YMM05, YMM05);
// - 2 / r -> YMM01
VRSQRTPS(YMM01, YMM02);
VMULPS(YMM02, YMM01, YMM01);
VFMSUB213PS_M(three[0], YMM02, YMM01);
VMULPS(YMM02, YMM01, YMM01);
// mask
VLOADPS(jptr->indx[0], YMM02);
VLOADPS(iptr->id01[0], YMM00);
VCMPNEQPS(YMM00, YMM02, YMM02);
VANDPS(YMM02, YMM01, YMM01);
// nearest neighbour (free: YMM00, YMM02, YMM06, YMM07, YMM08)
VLOADPS(iptr->rmin2[0], YMM00);
VMINPS(YMM01, YMM00, YMM02);
VSTORPS(YMM02, iptr->rmin2[0]);
VCMPPS(YMM01, YMM00, YMM02, GT);
VLOADPS(jptr->indx[0], YMM06);
VANDPS(YMM02, YMM06, YMM07);
VCMPPS(YMM01, YMM00, YMM08, LE);
VANDPS_M(iptr->in[0], YMM08, YMM08);
VADDPS(YMM08, YMM07, YMM07);
VSTORPS(YMM07, iptr->in[0]);
// neighbour list
VLOADPS(iptr->hinv[0], YMM00);
VCMPPS(YMM00, YMM01, YMM00, LE);
VLOADPS(flag[0], YMM02);
VANDPS(YMM02, YMM00, YMM00);
VSTORPS(YMM00, nbptr->flag[0]);
// potential
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VCVTPS2PD(XMM02, YMM00);
VUP2LOW(YMM02, XMM06);
VCVTPS2PD(XMM06, YMM06);
VHADDPD(YMM06, YMM00, YMM07);
VADDPD(YMM07, YMM09, YMM09);
// dvx, dvy, dvz (vj - vi)
VLOADPS(jptr->xvel[0], YMM06);
VSUBPS_M(iptr->xvel01[0], YMM06, YMM06);
VLOADPS(jptr->yvel[0], YMM07);
VSUBPS_M(iptr->yvel01[0], YMM07, YMM07);
VLOADPS(jptr->zvel[0], YMM08);
VSUBPS_M(iptr->zvel01[0], YMM08, YMM08);
// xv -> YMM00
VMULPS(YMM03, YMM06, YMM00);
VFMADDPS(YMM00, YMM04, YMM07);
VFMADDPS(YMM00, YMM05, YMM08);
// YMM00: 3.0 * xv / r^2, YMM02: - m / r^3
VMULPS_M(jptr->mass[0], YMM01, YMM02);
VMULPS(YMM01, YMM01, YMM01);
VMULPS(YMM01, YMM00, YMM00);
VMULPS(YMM01, YMM02, YMM02);
VMULPS_M(threefourth[0], YMM00, YMM00);
// prefetch
PREFETCH((jptr+1)->xpos[0]);
PREFETCH((jptr+1)->zpos[0]);
PREFETCH((jptr+1)->mass[0]);
PREFETCH((jptr+1)->yvel[0]);
// jx1, jy1, jz1
VFMADDPS(YMM13, YMM02, YMM06);
VFMADDPS(YMM14, YMM02, YMM07);
VFMADDPS(YMM15, YMM02, YMM08);
// ax
VMULPS(YMM02, YMM03, YMM03);
VCVTPS2PD(XMM03, YMM06);
VUP2LOW(YMM03, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM10, YMM10);
// ay
VMULPS(YMM02, YMM04, YMM04);
VCVTPS2PD(XMM04, YMM06);
VUP2LOW(YMM04, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM11, YMM11);
// az
VMULPS(YMM02, YMM05, YMM05);
VCVTPS2PD(XMM05, YMM06);
VUP2LOW(YMM05, XMM07);
VCVTPS2PD(XMM07, YMM07);
VHADDPD(YMM07, YMM06, YMM06);
VADDPD(YMM06, YMM12, YMM12);
// jx2, jy2, jz2
VFNMADDPS(YMM13, YMM00, YMM03);
VFNMADDPS(YMM14, YMM00, YMM04);
VFNMADDPS(YMM15, YMM00, YMM05);
}
VSTORPD(YMM09, iptr->pot[0]);
VSTORPD(YMM10, iptr->xacc[0]);
VSTORPD(YMM11, iptr->yacc[0]);
VSTORPD(YMM12, iptr->zacc[0]);
VSTORPS(YMM13, iptr->xjrk[0]);
VSTORPS(YMM14, iptr->yjrk[0]);
VSTORPS(YMM15, iptr->zjrk[0]);
accjerk[0].xacc = iptr->xacc[0] + iptr->xacc[2];
accjerk[0].yacc = iptr->yacc[0] + iptr->yacc[2];
accjerk[0].zacc = iptr->zacc[0] + iptr->zacc[2];
accjerk[0].pot = iptr->pot[0] + iptr->pot[2];
accjerk[0].xjrk = iptr->xjrk[0] + iptr->xjrk[1] + iptr->xjrk[2] + iptr->xjrk[3];
accjerk[0].yjrk = iptr->yjrk[0] + iptr->yjrk[1] + iptr->yjrk[2] + iptr->yjrk[3];
accjerk[0].zjrk = iptr->zjrk[0] + iptr->zjrk[1] + iptr->zjrk[2] + iptr->zjrk[3];
for(true_rmin2 = 1e30, j = 0; j < JPARA; j++){
if(iptr->rmin2[j] < true_rmin2){
true_rmin2 = iptr->rmin2[j];
accjerk[0].rnnb = - 2.0 / true_rmin2;
accjerk[0].nnb = (int)iptr->in[j];
}
}
accjerk[1].xacc = iptr->xacc[1] + iptr->xacc[3];
accjerk[1].yacc = iptr->yacc[1] + iptr->yacc[3];
accjerk[1].zacc = iptr->zacc[1] + iptr->zacc[3];
accjerk[1].pot = iptr->pot[1] + iptr->pot[3];
accjerk[1].xjrk = iptr->xjrk[4] + iptr->xjrk[5] + iptr->xjrk[6] + iptr->xjrk[7];
accjerk[1].yjrk = iptr->yjrk[4] + iptr->yjrk[5] + iptr->yjrk[6] + iptr->yjrk[7];
accjerk[1].zjrk = iptr->zjrk[4] + iptr->zjrk[5] + iptr->zjrk[6] + iptr->zjrk[7];
for(true_rmin2 = 1e30, j = 4; j < 4 + JPARA; j++){
if(iptr->rmin2[j] < true_rmin2){
true_rmin2 = iptr->rmin2[j];
accjerk[1].rnnb = - 2.0 / true_rmin2;
accjerk[1].nnb = (int)iptr->in[j];
}
}
int jj;
int nn0, nn1;
for(nn0 = nn1 = 0, j = 0, jptr = pred_mem, nbptr = nbptr0; j < nj; j += JPARA, jptr++, nbptr++){
for(jj = 0; jj < JPARA; jj++)
if(nbptr->flag[jj] == 1.0){
nbl[i][nn0] = (int)jptr->indx[jj];
++nn0;
}
for(jj = 4; jj < JPARA + 4; jj++)
if(nbptr->flag[jj] == 1.0){
nbl[i+1][nn1] = (int)jptr->indx[jj];
++nn1;
}
}
if(nn0 > MAXLEN || nn1 > MAXLEN)
nblerror = 1;
nblen[i] = nn0;
nblen[i+1] = nn1;
free(iptr);
return;
}
|
functions.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "functions.h"
//compute a*b mod p safely
unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprod(aExpb, z, p);
z = modprod(z, z, p);
b /= 2;
}
return aExpb;
}
//returns either 0 or 1 randomly
unsigned int randomBit() {
return rand()%2;
}
//returns a random integer which is between 2^{n-1} and 2^{n}
unsigned int randXbitInt(unsigned int n) {
unsigned int r = 1;
for (unsigned int i=0; i<n-1; i++) {
r = r*2 + randomBit();
}
return r;
}
//tests for primality and return 1 if N is probably prime and 0 if N is composite
unsigned int isProbablyPrime(unsigned int N) {
if (N%2==0) return 0; //not interested in even numbers (including 2)
unsigned int NsmallPrimes = 168;
unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163,
167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251,
257, 263, 269, 271, 277, 281, 283, 293,
307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443,
449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557,
563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647,
653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757,
761, 769, 773, 787, 797, 809, 811, 821,
823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983,
991, 997};
//before using a probablistic primality check, check directly using the small primes list
for (unsigned int n=1;n<NsmallPrimes;n++) {
if (N==smallPrimeList[n]) return 1; //true
if (N%smallPrimeList[n]==0) return 0; //false
}
//if we're testing a large number switch to Miller-Rabin primality test
unsigned int r = 0;
unsigned int d = N-1;
while (d%2 == 0) {
d /= 2;
r += 1;
}
for (unsigned int n=0;n<NsmallPrimes;n++) {
unsigned int k = smallPrimeList[n];
unsigned int x = modExp(k,d,N);
if ((x==1) || (x==N-1)) continue;
for (unsigned int i=1;i<r-1;i++) {
x = modprod(x,x,N);
if (x == 1) return 0; //false
if (x == N-1) break;
}
// see whether we left the loop because x==N-1
if (x == N-1) continue;
return 0; //false
}
return 1; //true
}
//Finds a generator of Z_p using the assumption that p=2*q+1
unsigned int findGenerator(unsigned int p) {
unsigned int g;
unsigned int q = (p-1)/2;
do {
//make a random number 1<= g < p
g = randXbitInt(32)%p; //could also have passed n to findGenerator
} while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1));
return g;
}
void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g,
unsigned int *h, unsigned int *x) {
/* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number
which satisfies p=2*q+1 where q is also prime */
unsigned int q;
do {
*p = randXbitInt(n);
q = (*p-1)/2;
} while (!isProbablyPrime(*p) || !isProbablyPrime(q));
/* Use the fact that p=2*q+1 to quickly find a generator */
*g = findGenerator(*p);
//pick a secret key, x
*x = randXbitInt(n)%(*p);
//compute h
*h = modExp(*g,*x,*p);
printf("ElGamal Setup successful.\n");
printf("p = %u. \n", *p);
printf("g = %u is a generator of Z_%u \n", *g, *p);
printf("Secret key: x = %u \n", *x);
printf("h = g^x = %u\n", *h);
printf("\n");
}
void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int g, unsigned int h) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for(unsigned int i=0; i<Nints;i++) {
//pick y in Z_p randomly
unsigned int y;
do {
y = randXbitInt(32)%p;
} while (y==0); //dont allow y=0
//compute a = g^y
a[i] = modExp(g,y,p);
//compute s = h^y
unsigned int s = modExp(h,y,p);
//encrypt m by multiplying with s
m[i] = modprod(m[i],s,p);
}
}
void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int x) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for(unsigned int i=0; i<Nints;i++) {
//compute s = a^x
unsigned int s = modExp(a[i],x,p);
//compute s^{-1} = s^{p-2}
unsigned int invS = modExp(s,p-2,p);
//decrypt message by multplying by invS
m[i] = modprod(m[i],invS,p);
}
}
//Pad the end of string so its length is divisible by Nchars
// Assume there is enough allocated storage for the padded string
void padString(unsigned char* string, unsigned int charsPerInt)
{
/* Q1.2 Complete this function */
unsigned int terminator;
while ((strlen(string) % charsPerInt) != 0)
{
terminator = strlen(string);
string[terminator] = ' ';
}
string[strlen(string)] = '\0';
}
void convertStringToZ(unsigned char *string, unsigned int Nchars,
unsigned int *Z, unsigned int Nints)
{
/* Q1.3 Complete this function */
unsigned int cpi = Nchars/Nints;
#pragma omp parallel for
for(unsigned int i = 0; i < Nchars; i = i + cpi)
{
for (unsigned int j = 0; j < cpi; j++)
{
unsigned int notYet = (unsigned int)string[i+j]; //this is the int that is being prepared to shift
unsigned int readyToShift = notYet<<(j*8); //this is the casted integer that ends up being shifted
Z[i/cpi] = Z[i/cpi]^readyToShift; //this updates Z, based on the cpi number
}
}
/* Q2.2 Parallelize this function with OpenMP */
}
void convertZToString(unsigned int *Z, unsigned int Nints,
unsigned char *string, unsigned int Nchars)
{
/* Q1.4 Complete this function */
unsigned int cpi = Nchars/Nints;
#pragma omp parallel for
for(unsigned int i = 0; i < Nchars; i = i + cpi)
{
for (unsigned int j = 0; j < cpi; j++)
{
unsigned int tag = 0xFF; //this has an integer value of 255; this is the equivalent of a
//32 bit character with 24 zeros and eight ones
tag = tag << (j*8);
unsigned int tempLocation = Z[i/cpi] & tag; //this produces the bitwise and operation to recover the original character
tempLocation = tempLocation >> (j*8); //this will shift (2^8) bits
string[i+j] = (unsigned char)tempLocation; //this will cast it back into a character
}
}
/* Q2.2 Parallelize this function with OpenMP */
}
|
hello_world.c | // compile with gcc -fopenmp hello_world.c
# include <stdio.h>
# include <omp.h>
int main()
{
#pragma omp parallel
{
int ID = omp_get_thread_num();
printf("hello(%d)", ID);
printf("world(%d) \n", ID);
}
}
|
GB_unop__asin_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fp64_fp64)
// op(A') function: GB (_unop_tran__asin_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = asin (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asin (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = asin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
attention.c | #include "darknet.h"
#ifdef WIN32
#include <time.h>
#else
#include <sys/time.h>
#endif
#include <assert.h>
#define class temp
void extend_data_truth(data *d, int n, float val)
{
int i, j;
for(i = 0; i < d->y.rows; ++i){
d->y.vals[i] = (float*)realloc(d->y.vals[i], (d->y.cols+n)*sizeof(float));
for(j = 0; j < n; ++j){
d->y.vals[i][d->y.cols + j] = val;
}
}
d->y.cols += n;
}
matrix network_loss_data(network *net, data test)
{
int i,b;
int k = 1;
matrix pred = make_matrix(test.X.rows, k);
float *X = (float*)calloc(net->batch*test.X.cols, sizeof(float));
float *y = (float*)calloc(net->batch*test.y.cols, sizeof(float));
for(i = 0; i < test.X.rows; i += net->batch){
for(b = 0; b < net->batch; ++b){
if(i+b == test.X.rows) break;
memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
memcpy(y+b*test.y.cols, test.y.vals[i+b], test.y.cols*sizeof(float));
}
network orig = *net;
net->input = X;
net->truth = y;
net->train = 0;
net->delta = 0;
forward_network(net);
*net = orig;
float *delta = net->layers[net->n-1].output;
for(b = 0; b < net->batch; ++b){
if(i+b == test.X.rows) break;
int t = max_index(y + b*test.y.cols, 1000);
float err = sum_array(delta + b*net->outputs, net->outputs);
pred.vals[i+b][0] = -err;
//pred.vals[i+b][0] = 1-delta[b*net->outputs + t];
}
}
free(X);
free(y);
return pred;
}
void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
int i, j;
float avg_cls_loss = -1;
float avg_att_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
network **nets = (network**)calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0){
opencl_set_device(gpus[i]);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *train_list = option_find_str(options, "train", "data/train.list");
int classes = option_find_int(options, "classes", 2);
char **labels = get_labels(label_list);
list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
double time;
int divs=3;
int size=2;
load_args args = {0};
args.w = divs*net->w/size;
args.h = divs*net->h/size;
args.size = divs*net->w/size;
args.threads = 32;
args.hierarchy = net->hierarchy;
args.min = net->min_ratio*args.w;
args.max = net->max_ratio*args.w;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.saturation = net->saturation;
args.hue = net->hue;
args.paths = paths;
args.classes = classes;
args.n = imgs;
args.m = N;
args.labels = labels;
args.type = CLASSIFICATION_DATA;
data train;
data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
int epoch = (*net->seen)/N;
while(get_current_batch(net) < net->max_batches || net->max_batches == 0){
time = what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
data resized = resize_data(train, net->w, net->h);
extend_data_truth(&resized, divs*divs, 0);
data *tiles = tile_data(train, divs, size);
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time = what_time_is_it_now();
float aloss = 0;
float closs = 0;
int z;
for (i = 0; i < divs*divs/ngpus; ++i) {
#pragma omp parallel for
for(j = 0; j < ngpus; ++j){
int index = i*ngpus + j;
extend_data_truth(tiles+index, divs*divs, SECRET_NUM);
matrix deltas = network_loss_data(nets[j], tiles[index]);
for(z = 0; z < resized.y.rows; ++z){
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
}
free_matrix(deltas);
}
}
int *inds = (int*)calloc(resized.y.rows, sizeof(int));
for(z = 0; z < resized.y.rows; ++z){
int index = max_index(resized.y.vals[z] + train.y.cols, divs*divs);
inds[z] = index;
for(i = 0; i < divs*divs; ++i){
resized.y.vals[z][train.y.cols + i] = (i == index)? 1 : 0;
}
}
data best = select_data(tiles, inds);
free(inds);
#ifdef GPU
if(gpu_index >= 0) {
if (ngpus == 1) {
closs = train_network(net, train);
} else {
closs = train_networks(nets, ngpus, train, 4);
}
}
else {
closs = train_network(net, train);
}
#else
closs = train_network(net, train);
#endif
for (i = 0; i < divs*divs; ++i) {
printf("%.2f ", resized.y.vals[0][train.y.cols + i]);
if((i+1)%divs == 0) printf("\n");
free_data(tiles[i]);
}
free_data(best);
printf("\n");
image im = float_to_image(64,64,3,resized.X.vals[0]);
//show_image(im, "orig");
//cvWaitKey(100);
/*
image im1 = float_to_image(64,64,3,tiles[i].X.vals[0]);
image im2 = float_to_image(64,64,3,resized.X.vals[0]);
show_image(im1, "tile");
show_image(im2, "res");
*/
#ifdef GPU
if(gpu_index >= 0) {
if (ngpus == 1) {
aloss = train_network(net, train);
} else {
aloss = train_networks(nets, ngpus, train, 4);
}
}
else {
aloss = train_network(net, train);
}
#else
aloss = train_network(net, train);
#endif
for(i = 0; i < divs*divs; ++i){
printf("%f ", nets[0]->output[1000 + i]);
if ((i+1) % divs == 0) printf("\n");
}
printf("\n");
free_data(resized);
free_data(train);
if(avg_cls_loss == -1) avg_cls_loss = closs;
if(avg_att_loss == -1) avg_att_loss = aloss;
avg_cls_loss = avg_cls_loss*.9 + closs*.1;
avg_att_loss = avg_att_loss*.9 + aloss*.1;
printf("%ld, %.3f: Att: %f, %f avg, Class: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net->seen)/N, aloss, avg_att_loss, closs, avg_cls_loss, get_current_rate(net), what_time_is_it_now()-time, *net->seen);
if(*net->seen/N > epoch){
epoch = *net->seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
}
if(get_current_batch(net)%1000 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
pthread_join(load_thread, 0);
free_network(net);
free_ptrs((void**)labels, classes);
free_ptrs((void**)paths, plist->size);
free_list(plist);
free(base);
}
void validate_attention_single(char *datacfg, char *filename, char *weightfile)
{
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *leaf_list = option_find_str(options, "leaves", 0);
if(leaf_list) change_leaves(net->hierarchy, leaf_list);
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = (int*)calloc(topk, sizeof(int));
int divs = 4;
int size = 2;
int extra = 0;
float *avgs = (float*)calloc(classes, sizeof(float));
int *inds = (int*)calloc(divs*divs, sizeof(int));
for(i = 0; i < m; ++i){
int class = -1;
char *path = paths[i];
for(j = 0; j < classes; ++j){
if(strstr(path, labels[j])){
class = j;
break;
}
}
image im = load_image_color(paths[i], 0, 0);
image resized = resize_min(im, net->w*divs/size);
image crop = crop_image(resized, (resized.w - net->w*divs/size)/2, (resized.h - net->h*divs/size)/2, net->w*divs/size, net->h*divs/size);
image rcrop = resize_image(crop, net->w, net->h);
//show_image(im, "orig");
//show_image(crop, "cropped");
//cvWaitKey(0);
float *pred = network_predict(net, rcrop.data);
//pred[classes + 56] = 0;
for(j = 0; j < divs*divs; ++j){
printf("%.2f ", pred[classes + j]);
if((j+1)%divs == 0) printf("\n");
}
printf("\n");
copy_cpu(classes, pred, 1, avgs, 1);
top_k(pred + classes, divs*divs, divs*divs, inds);
show_image(crop, "crop", 0);
for(j = 0; j < extra; ++j){
int index = inds[j];
int row = index / divs;
int col = index % divs;
int y = row * crop.h / divs - (net->h - crop.h/divs)/2;
int x = col * crop.w / divs - (net->w - crop.w/divs)/2;
printf("%d %d %d %d\n", row, col, y, x);
image tile = crop_image(crop, x, y, net->w, net->h);
float *pred = network_predict(net, tile.data);
axpy_cpu(classes, 1., pred, 1, avgs, 1);
show_image(tile, "tile", 10);
}
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
if(rcrop.data != resized.data) free_image(rcrop);
if(resized.data != im.data) free_image(resized);
free_image(im);
free_image(crop);
top_k(pred, classes, topk, indexes);
if(indexes[0] == class) avg_acc += 1;
for(j = 0; j < topk; ++j){
if(indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
}
}
void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
{
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
int scales[] = {224, 288, 320, 352, 384};
int nscales = sizeof(scales)/sizeof(scales[0]);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = (int*)calloc(topk, sizeof(int));
for(i = 0; i < m; ++i){
int class = -1;
char *path = paths[i];
for(j = 0; j < classes; ++j){
if(strstr(path, labels[j])){
class = j;
break;
}
}
float *pred = (float*)calloc(classes, sizeof(float));
image im = load_image_color(paths[i], 0, 0);
for(j = 0; j < nscales; ++j){
image r = resize_min(im, scales[j]);
resize_network(net, r.w, r.h);
float *p = network_predict(net, r.data);
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
axpy_cpu(classes, 1, p, 1, pred, 1);
flip_image(r);
p = network_predict(net, r.data);
axpy_cpu(classes, 1, p, 1, pred, 1);
if(r.data != im.data) free_image(r);
}
free_image(im);
top_k(pred, classes, topk, indexes);
free(pred);
if(indexes[0] == class) avg_acc += 1;
for(j = 0; j < topk; ++j){
if(indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
}
}
void predict_attention(char *datacfg, char *cfgfile, char *weightfile, char *filename, int top)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
if(top == 0) top = option_find_int(options, "top", 1);
int i = 0;
char **names = get_labels(name_list);
clock_t time;
int *indexes = (int*)calloc(top, sizeof(int));
char buff[256];
char *input = buff;
while(1){
if(filename){
strncpy(input, filename, 256);
}else{
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input, 0, 0);
int resize = im.w != net->w || im.h != net->h;
image r = resize ? letterbox_image(im, net->w, net->h) : im;
//resize_network(&net, r.w, r.h);
//printf("%d %d\n", r.w, r.h);
float *X = r.data;
time=clock();
float *predictions = network_predict(net, X);
if(net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
top_k(predictions, net->outputs, top, indexes);
fprintf(stderr, "%s: Predicted in %f seconds.\n", input, sec(clock()-time));
for(i = 0; i < top; ++i){
int index = indexes[i];
//if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index, names[index], predictions[index], (net->hierarchy->parent[index] >= 0) ? names[net->hierarchy->parent[index]] : "Root");
//else printf("%s: %f\n",names[index], predictions[index]);
printf("%5.2f%%: %s\n", predictions[index]*100, names[index]);
}
if(r.data != im.data) free_image(r);
free_image(im);
if (filename) break;
}
}
void run_attention(int argc, char **argv)
{
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
int ngpus;
int *gpus = read_intlist(gpu_list, &ngpus, gpu_index);
int top = find_int_arg(argc, argv, "-t", 0);
int clear = find_arg(argc, argv, "-clear");
char *data = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
char *layer_s = (argc > 7) ? argv[7]: 0;
if(0==strcmp(argv[2], "predict")) predict_attention(data, cfg, weights, filename, top);
else if(0==strcmp(argv[2], "train")) train_attention(data, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_attention_single(data, cfg, weights);
else if(0==strcmp(argv[2], "validmulti")) validate_attention_multi(data, cfg, weights);
}
#undef class |
example_helper.c | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
/** Initialize a 2D array with some progressive values
@param width is the size of the array in the second dimension
@param height is the size of the array in the first dimension
@param[out] array is the array to initialize
Note that we could also use this [out] Doxygen information to avoid
specifying it again in the #pragma...
*/
void init_image(int width, int height, int array[height][width]) {
// Can be executed in parallel
#pragma omp parallel for
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
// Initialize with stripes:
array[i][j] = (i + 3*j) >> ((i - j) & 7);
}
/** Write the content of an array to Portable Gray Map image format (PGM)
@param[in] filename is the name of the file to write into the image
@param n is the size of the array in the first dimension
@param m is the size of the array in the second dimension
@param[in] array is the array to use as image content. Note we could
infer the [in] information and communication directions directly from
"const" qualifier
*/
void write_pgm_image(const char filename[], int width, int height,
const unsigned char array[height][width]) {
FILE * fp;
char * comments = "# This is an image generated by the " __FILE__
" program.\n"
"# SMECY ARTEMIS European project.\n";
// Open the image file for writing:
if ((fp = fopen(filename, "w")) == NULL) {
perror("Error opening file");
exit(EXIT_FAILURE);
}
/* Write the PGM header which begins with, in ASCII decimal, the
width, the height and the maximum gray value (255 here): */
fprintf(fp,"P5\n%d %d\n%s%d\n", width, height, comments, UCHAR_MAX);
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
// Write a pixel value:
fputc(array[i][j], fp);
// Close the file:
fclose(fp);
}
/** Normalize an array of integer values into an array of unsigned char
This is typically used to generate a gray image from arbitrary data.
*/
void normalize_to_char(int width, int height, int array[height][width],
unsigned char output[height][width]) {
/* First find the minimum and maximum values of array for
later normalization: */
// Initialize the minimum value to the biggest integer:
int minimum = INT_MAX;
// Initialize the maximum value to the smallest integer:
int maximum = INT_MIN;
// min & max from next OpenMP 3.1 not yet implemented in gcc 4.6 :-(
//#pragma omp parallel for reduction(min:minimum) reduction(max:maximum)
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++) {
int v = array[i][j];
if (v < minimum) minimum = v;
else if (v > maximum) maximum = v;
}
// Now do the normalization
float f = UCHAR_MAX/(float)(maximum - minimum);
#pragma omp parallel for
for(int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
output[i][j] = (array[i][j] - minimum)*f;
}
/* Apply a vertical symmetry to a subsquare in an image
*/
void square_symmetry(int width, int height, int image[height][width],
int square_size, int x_offset, int y_offset) {
// Can be executed in parallel
#pragma omp parallel for
for(int i = 0; i < square_size/2; i++)
for(int j = 0; j < square_size; j++) {
int tmp = image[y_offset + i][x_offset + j];
image[y_offset + i][x_offset + j] =
image[y_offset + square_size - i][x_offset + j];
image[y_offset + square_size - i][x_offset + j] = tmp;
}
}
|
GB_binop__ge_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint32)
// A*D function (colscale): GB (_AxD__ge_uint32)
// D*A function (rowscale): GB (_DxB__ge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint32)
// C=scalar+B GB (_bind1st__ge_uint32)
// C=scalar+B' GB (_bind1st_tran__ge_uint32)
// C=A+scalar GB (_bind2nd__ge_uint32)
// C=A'+scalar GB (_bind2nd_tran__ge_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_int16
// A.*B function (eWiseMult): GB_AemultB__ne_int16
// A*D function (colscale): GB_AxD__ne_int16
// D*A function (rowscale): GB_DxB__ne_int16
// C+=B function (dense accum): GB_Cdense_accumB__ne_int16
// C+=b function (dense accum): GB_Cdense_accumb__ne_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int16
// C=scalar+B GB_bind1st__ne_int16
// C=scalar+B' GB_bind1st_tran__ne_int16
// C=A+scalar GB_bind2nd__ne_int16
// C=A'+scalar GB_bind2nd_tran__ne_int16
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT16 || GxB_NO_NE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ne_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vq_train.c | /*Daala video codec
Copyright (c) 2012-2014 Daala project contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "od_defs.h"
#include "../src/dct.h"
#define MAX(a,b) ((a)>(b)?(a):(b))
#define MAX_ENTRIES (4096)
#define MAX_DIMS (128)
#if 0
# undef NUM_PROCS
# define NUM_PROCS (1)
#endif
static double inner_prod(const double *x, const double *y, int n) {
double sum;
int i;
sum = 0;
for (i = 0; i < n; i++) sum += x[i]*y[i];
return sum;
}
static void normalize(double *x, int n) {
int i;
double sum;
sum = 1e-30;
for (i = 0; i < n; i++) sum += x[i]*x[i];
sum = 1./sqrt(sum);
for (i = 0; i < n; i++) x[i] *= sum;
}
/* Returns the distance to the closest K=2 codeword. We can take a shortcut
because there are only two possibilities: both pulses at the position with
largest magnitude, or one pulse at each of the two largest magnitudes. */
static double pvq_dist_k2(const double *data, int n) {
double xbest1;
double xbest2;
int i;
xbest1 = xbest2 = -1;
for (i = 0; i < n; i++) {
if (fabs(data[i]) > xbest2) {
if (fabs(data[i]) > xbest1) {
xbest2 = xbest1;
xbest1 = fabs(data[i]);
}
else {
xbest2 = fabs(data[i]);
}
}
}
return 2 - 2*MAX(xbest1, M_SQRT1_2*(xbest1 + xbest2));
}
static int find_nearest(const double *data, const double *codebook, int nb_entries,
int n, double *sign, double *err) {
double best_dist;
double best_sign;
int best_id;
int i;
best_dist = -1;
best_id = 0;
best_sign = 1;
for (i = 0; i < nb_entries; i++) {
double dist;
dist = inner_prod(data, &codebook[i*n], n);
if (fabs(dist) > best_dist) {
best_dist = fabs(dist);
best_sign = dist > 0 ? 1 : -1;
best_id = i;
}
}
if (sign) *sign = best_sign;
if (err) *err = 2 - 2*best_dist;
return best_id;
}
void vq_rand_init(const double *data, int nb_vectors, double *codebook,
int nb_entries, int n) {
int i;
int j;
/* Start with a codebook made of randomly selected vectors. */
for (i = 0; i < nb_entries; i++) {
int id;
id = rand()%nb_vectors;
for (j = 0; j < n; j++) {
/* Add some noise just in case we pick the same vector twice. */
codebook[i*n + j] = data[id*n + j] + .01*(rand()%3 - 1);
}
normalize(&codebook[i*n], n);
}
}
double vq_train(const double *data, int nb_vectors, double *codebook,
int nb_entries, int n, int nb_iter, int exclude_pvq) {
int i;
int iter;
double rms[NUM_PROCS];
double *accum;
accum = malloc(MAX_ENTRIES*MAX_DIMS*NUM_PROCS*sizeof(*accum));
for (iter = 0; iter < nb_iter; iter++) {
for (i = 0; i < NUM_PROCS; i++) rms[i] = 0;
memset(accum,0,nb_entries*n*NUM_PROCS*sizeof(*accum));
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < nb_vectors; i++) {
int tid;
int id;
double sign;
double pvq_err;
double err;
tid=OD_OMP_GET_THREAD;
id = find_nearest(&data[i*n], codebook, nb_entries, n, &sign, &err);
pvq_err = pvq_dist_k2(&data[i*n], n);
/*printf("%f ", err);*/
if (!exclude_pvq || err < pvq_err) {
int j;
int offset;
rms[tid] += err;
offset = nb_entries*n*tid + id*n;
for (j = 0; j < n; j++) accum[offset + j] += sign*data[i*n + j];
}
else rms[tid] += pvq_err;
}
for (i = 1; i < NUM_PROCS; i++) {
int j;
int offset;
offset = nb_entries*n*i;
for (j = 0; j < nb_entries*n; j++) accum[j] += accum[offset+j];
}
for (i = 1; i < NUM_PROCS; i++) rms[0] += rms[i];
for (i = 0; i < nb_entries; i++) normalize(&accum[i*n], n);
for (i = 0; i < nb_entries*n; i++) codebook[i] = accum[i];
rms[0] = sqrt(rms[0]/nb_vectors);
fprintf(stderr, "RMS: %f\n", rms[0]);
}
free(accum);
return rms[0];
}
int main(int argc, char **argv)
{
int i;
int j;
int nb_vectors;
int nb_entries;
int ndim;
double *data;
double *codebook;
double rms;
unsigned seed;
seed = time(NULL);
srand(seed);
if (argc != 4) {
fprintf(stderr, "usage: %s <dimensions> <max vectors> <bits>\n",argc > 0? argv[0] : '\0');
return 1;
}
ndim = atoi(argv[1]);
nb_vectors = atoi(argv[2]);
nb_entries = 1<<atoi(argv[3]);
OD_OMP_SET_THREADS(NUM_PROCS);
data = malloc(nb_vectors*ndim*sizeof(*data));
codebook = malloc(nb_entries*ndim*sizeof(*codebook));
if (data == NULL || codebook == NULL) {
fprintf(stderr, "malloc() failed, giving up.\n");
return 1;
}
for (i = 0;i < nb_vectors; i++) {
if (feof(stdin))
break;
for (j = 0; j < ndim; j++) {
if(scanf("%lf ", &data[i*ndim + j]) != 1) exit(EXIT_FAILURE);
}
normalize(&data[i*ndim], ndim);
}
nb_vectors = i;
fprintf(stderr, "read %d vectors\n", nb_vectors);
vq_rand_init(data, nb_vectors, codebook, nb_entries, ndim);
rms = vq_train(data, nb_vectors, codebook, nb_entries, ndim, 100, 1);
#if 0
for (i = 0; i < nb_vectors; i++)
{
double sign;
int nearest;
nearest = find_nearest(&data[i*ndim], codebook, nb_entries, ndim, &sign,
NULL);
printf("%d %f\n", nearest, sign);
}
#endif
printf("/* Automatically generated by vq_train. */\n");
printf("/* Seed was %u. */\n", seed);
printf("/* RMS training error is %f. */\n", rms);
printf("const double codebook[%d*%d] = {\n", nb_entries, ndim);
for (i = 0; i < nb_entries; i++) {
for(j = 0; j < ndim; j++) printf("%f, ", codebook[i*ndim + j]);
printf("\n");
}
printf("};\n");
free(data);
free(codebook);
return 0;
}
|
GB_binop__le_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint8)
// A*D function (colscale): GB (_AxD__le_uint8)
// D*A function (rowscale): GB (_DxB__le_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint8)
// C=scalar+B GB (_bind1st__le_uint8)
// C=scalar+B' GB (_bind1st_tran__le_uint8)
// C=A+scalar GB (_bind2nd__le_uint8)
// C=A'+scalar GB (_bind2nd_tran__le_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT8 || GxB_NO_LE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_03__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc32)
// A*D function (colscale): GB (_AxD__minus_fc32)
// D*A function (rowscale): GB (_DxB__minus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc32)
// C=scalar+B GB (_bind1st__minus_fc32)
// C=scalar+B' GB (_bind1st_tran__minus_fc32)
// C=A+scalar GB (_bind2nd__minus_fc32)
// C=A'+scalar GB (_bind2nd_tran__minus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_minus (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC32_minus (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FC32 || GxB_NO_MINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_minus (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_minus (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
multiway_merge.h | /***************************************************************************
* include/stxxl/bits/parallel/multiway_merge.h
*
* Implementation of sequential and parallel multiway merge.
* Extracted from MCSTL - http://algo2.iti.uni-karlsruhe.de/singler/mcstl/
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2007 Johannes Singler <singler@ira.uka.de>
* Copyright (C) 2014 Timo Bingmann <tb@panthema.net>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#define STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#include <vector>
#include <iterator>
#include <algorithm>
#include <stxxl/bits/verbose.h>
#include <stxxl/bits/common/is_sorted.h>
#include <stxxl/bits/common/utils.h>
#include <stxxl/bits/parallel/merge.h>
#include <stxxl/bits/parallel/losertree.h>
#include <stxxl/bits/parallel/settings.h>
#include <stxxl/bits/parallel/equally_split.h>
#include <stxxl/bits/parallel/multiseq_selection.h>
#include <stxxl/bits/parallel/timing.h>
#include <stxxl/bits/parallel/tags.h>
#if defined(_MSC_VER) && STXXL_DEBUG_ASSERTIONS
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif
STXXL_BEGIN_NAMESPACE
namespace parallel {
//! Length of a sequence described by a pair of iterators.
template <typename RandomAccessIteratorPair>
typename std::iterator_traits<
typename RandomAccessIteratorPair::first_type
>::difference_type
iterpair_size(const RandomAccessIteratorPair& p)
{
return p.second - p.first;
}
/*!
* Iterator wrapper supporting an implicit supremum at the end of the sequence,
* dominating all comparisons. Deriving from RandomAccessIterator is not
* possible since RandomAccessIterator need not be a class.
*/
template <typename RandomAccessIterator, typename Comparator>
class guarded_iterator
{
public:
//! Our own type
typedef guarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! End iterator of the sequence.
RandomAccessIterator end;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* \param end End iterator of sequence.
* \param comp Comparator provided for associated overloaded compare
* operators.
*/
guarded_iterator(RandomAccessIterator begin, RandomAccessIterator end,
Comparator& comp)
: current(begin), end(end), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
if (bi1.current == bi1.end) // bi1 is sup
return bi2.current == bi2.end; // bi2 is not sup
if (bi2.current == bi2.end) // bi2 is sup
return true;
return bi1.comp(*bi1, *bi2); // normal compare
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
if (bi2.current == bi2.end) //bi1 is sup
return bi1.current != bi1.end; //bi2 is not sup
if (bi1.current == bi1.end) //bi2 is sup
return false;
return !bi1.comp(*bi2, *bi1); //normal compare
}
};
template <typename RandomAccessIterator, typename Comparator>
class unguarded_iterator
{
public:
//! Our own type
typedef unguarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* param end Unused, only for compatibility.
* \param comp Unused, only for compatibility.
*/
unguarded_iterator(RandomAccessIterator begin,
RandomAccessIterator /* end */,
Comparator& comp)
: current(begin), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
return bi1.comp(*bi1, *bi2); // normal compare, unguarded
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
return !bi1.comp(*bi2, *bi1); // normal compare, unguarded
}
};
/*!
* Prepare a set of sequences to be merged without a (end) guard
*
* \param seqs_begin
* \param seqs_end
* \param comp
* \param min_sequence
* \tparam Stable
* \pre (seqs_end - seqs_begin > 0)
*/
template <bool Stable, typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp,
int& min_sequence)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
if ((*seqs_begin).first == (*seqs_begin).second)
{
// empty sequence found, it's the first one
min_sequence = 0;
return -1;
}
// last element in sequence
value_type min = *((*seqs_begin).second - 1);
min_sequence = 0;
for (RandomAccessIteratorIterator s = seqs_begin + 1; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
{
// empty sequence found
min_sequence = static_cast<int>(s - seqs_begin);
return -1;
}
const value_type& v = *((*s).second - 1);
if (comp(v, min))
{
// last element in sequence is strictly smaller
min = v;
min_sequence = static_cast<int>(s - seqs_begin);
}
}
diff_type overhang_size = 0;
int s = 0;
for (s = 0; s <= min_sequence; ++s)
{
RandomAccessIterator split;
if (Stable)
split = std::upper_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
else
split = std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
for ( ; s < (seqs_end - seqs_begin); ++s)
{
RandomAccessIterator split =
std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Prepare a set of sequences to be merged with a (end) guard (sentinel)
* \param seqs_begin
* \param seqs_end
* \param comp
*/
template <typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded_sentinel(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
value_type* max_value = NULL; // last element in sequence
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
continue;
value_type& v = *((*s).second - 1); //last element in sequence
if (!max_value || comp(*max_value, v)) //strictly greater
max_value = &v;
}
diff_type overhang_size = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
RandomAccessIterator split = std::lower_bound((*s).first, (*s).second, *max_value, comp);
overhang_size += (*s).second - split;
*((*s).second) = *max_value; //set sentinel
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Highly efficient 3-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class Iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
Iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp);
if (seq0 <= seq1)
{
if (seq1 <= seq2)
goto s012;
else if (seq2 < seq0)
goto s201;
else
goto s021;
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
goto s102;
else
goto s120;
}
else
goto s210;
}
#define STXXL_MERGE3CASE(a, b, c, c0, c1) \
s ## a ## b ## c: \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (length == 0) goto finish; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c; \
goto s ## b ## c ## a;
STXXL_MERGE3CASE(0, 1, 2, <=, <=);
STXXL_MERGE3CASE(1, 2, 0, <=, <);
STXXL_MERGE3CASE(2, 0, 1, <, <);
STXXL_MERGE3CASE(1, 0, 2, <, <=);
STXXL_MERGE3CASE(0, 2, 1, <=, <=);
STXXL_MERGE3CASE(2, 1, 0, <, <);
#undef STXXL_MERGE3CASE
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_3_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
switch (min_seq)
{
case 0:
// iterators will be advanced accordingly
target_end = merge_advance(
seqs_begin[1].first, seqs_begin[1].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 1:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 2:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target_end, overhang, comp);
break;
default:
assert(false);
}
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Highly efficient 4-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into goto labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp),
seq3(seqs_begin[3].first, seqs_begin[3].second, comp);
#define STXXL_DECISION(a, b, c, d) do { \
if (seq ## d < seq ## a) goto s ## d ## a ## b ## c; \
if (seq ## d < seq ## b) goto s ## a ## d ## b ## c; \
if (seq ## d < seq ## c) goto s ## a ## b ## d ## c; \
goto s ## a ## b ## c ## d; \
} \
while (0)
if (seq0 <= seq1)
{
if (seq1 <= seq2)
STXXL_DECISION(0, 1, 2, 3);
else if (seq2 < seq0)
STXXL_DECISION(2, 0, 1, 3);
else
STXXL_DECISION(0, 2, 1, 3);
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
STXXL_DECISION(1, 0, 2, 3);
else
STXXL_DECISION(1, 2, 0, 3);
}
else
STXXL_DECISION(2, 1, 0, 3);
}
#define STXXL_MERGE4CASE(a, b, c, d, c0, c1, c2) \
s ## a ## b ## c ## d: \
if (length == 0) goto finish; \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c ## d; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c ## d; \
if (seq ## a c2 seq ## d) goto s ## b ## c ## a ## d; \
goto s ## b ## c ## d ## a;
STXXL_MERGE4CASE(0, 1, 2, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 1, 3, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 1, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 3, 1, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 1, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 2, 1, <=, <=, <=);
STXXL_MERGE4CASE(1, 0, 2, 3, <, <=, <=);
STXXL_MERGE4CASE(1, 0, 3, 2, <, <=, <=);
STXXL_MERGE4CASE(1, 2, 0, 3, <=, <, <=);
STXXL_MERGE4CASE(1, 2, 3, 0, <=, <=, <);
STXXL_MERGE4CASE(1, 3, 0, 2, <=, <, <=);
STXXL_MERGE4CASE(1, 3, 2, 0, <=, <=, <);
STXXL_MERGE4CASE(2, 0, 1, 3, <, <, <=);
STXXL_MERGE4CASE(2, 0, 3, 1, <, <=, <);
STXXL_MERGE4CASE(2, 1, 0, 3, <, <, <=);
STXXL_MERGE4CASE(2, 1, 3, 0, <, <=, <);
STXXL_MERGE4CASE(2, 3, 0, 1, <=, <, <);
STXXL_MERGE4CASE(2, 3, 1, 0, <=, <, <);
STXXL_MERGE4CASE(3, 0, 1, 2, <, <, <);
STXXL_MERGE4CASE(3, 0, 2, 1, <, <, <);
STXXL_MERGE4CASE(3, 1, 0, 2, <, <, <);
STXXL_MERGE4CASE(3, 1, 2, 0, <, <, <);
STXXL_MERGE4CASE(3, 2, 0, 1, <, <, <);
STXXL_MERGE4CASE(3, 2, 1, 0, <, <, <);
#undef STXXL_MERGE4CASE
#undef STXXL_DECISION
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first) +
(seq3.iterator() - seqs_begin[3].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
seqs_begin[3].first = seq3.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType) - 1)
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_4_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
std::vector<RandomAccessIteratorPair> one_missing(seqs_begin, seqs_end);
one_missing.erase(one_missing.begin() + min_seq); //remove
target_end = multiway_merge_3_variant<guarded_iterator>(one_missing.begin(), one_missing.end(), target_end, overhang, comp);
one_missing.insert(one_missing.begin() + min_seq, seqs_begin[min_seq]); //insert back again
std::copy(one_missing.begin(), one_missing.end(), seqs_begin); //write back modified iterators
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Basic multi-way merging procedure.
*
* The head elements are kept in a sorted array, new heads are inserted
* linearly.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_bubble(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// num remaining pieces
int k = static_cast<int>(seqs_end - seqs_begin), nrp;
value_type* pl = new value_type[k];
int* source = new int[k];
DiffType total_length = 0;
#define POS(i) seqs_begin[(i)].first
#define STOPS(i) seqs_begin[(i)].second
//write entries into queue
nrp = 0;
for (int pi = 0; pi < k; ++pi)
{
if (STOPS(pi) != POS(pi))
{
pl[nrp] = *(POS(pi));
source[nrp] = pi;
++nrp;
total_length += iterpair_size(seqs_begin[pi]);
}
}
if (Stable)
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]) ||
(!comp(pl[pi - 1], pl[pi]) && source[pi] < source[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
else
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
// iterate
if (Stable)
{
int j;
while (nrp > 0 && length > 0)
{
if (source[0] < source[1])
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !(comp(pl[1], pl[0]))) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
// move everything to the left
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
else
{
// pl[0] < pl[1] ?
while ((nrp == 1 || comp(pl[0], pl[1])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
//sink down
j = 1;
while ((j < nrp) && (comp(pl[j], pl[j - 1]) ||
(!comp(pl[j - 1], pl[j]) && (source[j] < source[j - 1]))))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
else
{
int j;
while (nrp > 0 && length > 0)
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !comp(pl[1], pl[0])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < (nrp - 1); ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
//sink down
j = 1;
while ((j < nrp) && comp(pl[j], pl[j - 1]))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
delete[] pl;
delete[] source;
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, guarded case.
*
* The head elements are kept in a loser tree.
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename LoserTreeType::source_type source_type;
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
source_type k = static_cast<source_type>(seqs_end - seqs_begin);
LoserTreeType lt(k, comp);
DiffType total_length = 0;
const value_type* arbitrary_element = NULL;
// find an arbitrary element to avoid default construction
for (source_type t = 0; t < k; ++t)
{
if (!arbitrary_element && iterpair_size(seqs_begin[t]) > 0)
arbitrary_element = &(*seqs_begin[t].first);
total_length += iterpair_size(seqs_begin[t]);
}
for (source_type t = 0; t < k; ++t)
{
if (UNLIKELY(seqs_begin[t].first == seqs_begin[t].second))
lt.insert_start(*arbitrary_element, t, true);
else
lt.insert_start(*seqs_begin[t].first, t, false);
}
lt.init();
total_length = std::min(total_length, length);
for (DiffType i = 0; i < total_length; ++i)
{
// take out
source_type source = lt.get_min_source();
*target = *seqs_begin[source].first;
++target;
++seqs_begin[source].first;
// feed
if (seqs_begin[source].first == seqs_begin[source].second)
lt.delete_min_insert(*arbitrary_element, true);
else
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first, false);
}
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, unguarded case.
* The head elements are kept in a loser tree.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
* \pre No input will run out of elements during the merge.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_unguarded(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
int k = (int)(seqs_end - seqs_begin);
// sentinel is item at end of first sequence.
LoserTreeType lt(k, *(seqs_begin->second - 1), comp);
DiffType total_length = 0;
for (int t = 0; t < k; ++t)
{
assert(seqs_begin[t].first != seqs_begin[t].second);
lt.insert_start(*seqs_begin[t].first, t);
total_length += iterpair_size(seqs_begin[t]);
}
lt.init();
// do not go past end
length = std::min(total_length, length);
int source;
#if STXXL_DEBUG_ASSERTIONS
DiffType i = 0;
#endif
RandomAccessIterator3 target_end = target + length;
while (target < target_end)
{
// take out
source = lt.get_min_source();
#if STXXL_DEBUG_ASSERTIONS
assert(i == 0 || !comp(*(seqs_begin[source].first), *(target - 1)));
#endif
*target = *seqs_begin[source].first;
++seqs_begin[source].first;
++target;
#if STXXL_DEBUG_ASSERTIONS
assert((seqs_begin[source].first != seqs_begin[source].second) || (i == length - 1));
++i;
#endif
// feed
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first);
}
return target;
}
template <bool Stable, class ValueType, class Comparator>
struct loser_tree_traits
{
public:
typedef LoserTreePointer<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits<Stable, T, Comparator> \
{ \
typedef LoserTreeCopy<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER(unsigned char)
STXXL_NO_POINTER(char)
STXXL_NO_POINTER(unsigned short)
STXXL_NO_POINTER(short)
STXXL_NO_POINTER(unsigned int)
STXXL_NO_POINTER(int)
STXXL_NO_POINTER(unsigned long)
STXXL_NO_POINTER(long)
STXXL_NO_POINTER(unsigned long long)
STXXL_NO_POINTER(long long)
#undef STXXL_NO_POINTER
template <bool Stable, class ValueType, class Comparator>
class loser_tree_traits_unguarded
{
public:
typedef LoserTreePointerUnguarded<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER_UNGUARDED(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits_unguarded<Stable, T, Comparator> \
{ \
typedef LoserTreeCopyUnguarded<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER_UNGUARDED(unsigned char)
STXXL_NO_POINTER_UNGUARDED(char)
STXXL_NO_POINTER_UNGUARDED(unsigned short)
STXXL_NO_POINTER_UNGUARDED(short)
STXXL_NO_POINTER_UNGUARDED(unsigned int)
STXXL_NO_POINTER_UNGUARDED(int)
STXXL_NO_POINTER_UNGUARDED(unsigned long)
STXXL_NO_POINTER_UNGUARDED(long)
STXXL_NO_POINTER_UNGUARDED(unsigned long long)
STXXL_NO_POINTER_UNGUARDED(long long)
#undef STXXL_NO_POINTER_UNGUARDED
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_combined(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<Stable>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
target_end = multiway_merge_loser_tree
<typename loser_tree_traits<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target_end, overhang, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_sentinel(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// move end of sequences to include the sentinel for merging
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
++(*s).second;
RandomAccessIterator3 target_end
= multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, length, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
// restore end of sequences
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
--(*s).second;
return target_end;
}
/*!
* Sequential multi-way merging switch.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \tparam Sentinels The sequences have a sentinel element.
* \return End iterator of output sequence.
*/
template <bool Stable, bool Sentinels,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
sequential_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*s).first, (*s).second, comp));
RandomAccessIterator3 return_target = target;
int k = static_cast<int>(seqs_end - seqs_begin);
SETTINGS::MultiwayMergeAlgorithm mwma = SETTINGS::multiway_merge_algorithm;
if (!Sentinels && mwma == SETTINGS::LOSER_TREE_SENTINEL)
mwma = SETTINGS::LOSER_TREE_COMBINED;
switch (k)
{
case 0:
break;
case 1:
return_target = std::copy(seqs_begin[0].first,
seqs_begin[0].first + length,
target);
seqs_begin[0].first += length;
break;
case 2:
return_target = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target, length, comp);
break;
case 3:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_3_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_3_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_3_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
case 4:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_4_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_4_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_4_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
default:
{
switch (mwma)
{
case SETTINGS::BUBBLE:
return_target = multiway_merge_bubble<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE:
return_target = multiway_merge_loser_tree<
typename loser_tree_traits<Stable, value_type, Comparator>::LT>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_loser_tree_combined<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_loser_tree_sentinel<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
assert(0 && "multiway_merge algorithm not implemented");
break;
}
}
}
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
return return_target;
}
/*!
* Splitting method for parallel multi-way merge routine: use sampling and
* binary search for in-exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_sampling_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
const DiffType num_seqs = seqs_end - seqs_begin;
const DiffType num_samples = num_threads * SETTINGS::merge_oversampling;
// pick samples
value_type* samples = new value_type[num_seqs * num_samples];
for (DiffType s = 0; s < num_seqs; ++s)
{
for (DiffType i = 0; i < num_samples; ++i)
{
DiffType sample_index = static_cast<DiffType>(
double(iterpair_size(seqs_begin[s]))
* (double(i + 1) / double(num_samples + 1))
* (double(length) / double(total_length))
);
samples[s * num_samples + i] = seqs_begin[s].first[sample_index];
}
}
if (Stable)
std::stable_sort(samples, samples + (num_samples * num_seqs), comp);
else
std::sort(samples, samples + (num_samples * num_seqs), comp);
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (DiffType seq = 0; seq < num_seqs; ++seq)
{
if (slab > 0) {
chunks[slab][seq].first =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * slab / num_threads],
comp);
}
else // absolute beginning
chunks[slab][seq].first = seqs_begin[seq].first;
if ((slab + 1) < num_threads) {
chunks[slab][seq].second =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * (slab + 1) / num_threads],
comp);
}
else // absolute ending
chunks[slab][seq].second = seqs_begin[seq].second;
}
}
delete[] samples;
}
/*!
* Splitting method for parallel multi-way merge routine: use multisequence
* selection for exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_exact_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
typedef typename RandomAccessIteratorPair
::first_type RandomAccessIterator;
const size_t num_seqs = seqs_end - seqs_begin;
const bool tight = (total_length == length);
std::vector<RandomAccessIterator>* offsets
= new std::vector<RandomAccessIterator>[num_threads];
std::vector<DiffType> ranks(num_threads + 1);
equally_split(length, num_threads, ranks.begin());
for (thread_index_t s = 0; s < (num_threads - 1); ++s)
{
offsets[s].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
ranks[s + 1], offsets[s].begin(), comp);
if (!tight) // last one also needed and available
{
offsets[num_threads - 1].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
length, offsets[num_threads - 1].begin(), comp);
}
}
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (size_t s = 0; s < num_seqs; ++s)
{
if (slab == 0) // absolute beginning
chunks[slab][s].first = seqs_begin[s].first;
else
chunks[slab][s].first = offsets[slab - 1][s];
if (!tight || slab < (num_threads - 1))
chunks[slab][s].second = offsets[slab][s];
else // slab == num_threads - 1
chunks[slab][s].second = seqs_begin[s].second;
}
}
delete[] offsets;
}
#if STXXL_PARALLEL
/*!
* Parallel multi-way merge routine.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
parallel_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, const DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
for (RandomAccessIteratorIterator rii = seqs_begin; rii != seqs_end; ++rii)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*rii).first, (*rii).second, comp));
// leave only non-empty sequences
std::vector<RandomAccessIteratorPair> seqs_ne;
seqs_ne.reserve(seqs_end - seqs_begin);
DiffType total_length = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0) {
total_length += length;
seqs_ne.push_back(*raii);
}
}
size_t num_seqs = seqs_ne.size();
STXXL_PARALLEL_PCALL(total_length);
if (total_length == 0 || num_seqs == 0)
return target;
thread_index_t num_threads = static_cast<thread_index_t>(
std::min(static_cast<DiffType>(SETTINGS::num_threads), total_length));
Timing<inactive_tag>* t = new Timing<inactive_tag>[num_threads];
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
// thread t will have to merge chunks[iam][0..k - 1]
std::vector<RandomAccessIteratorPair>* chunks
= new std::vector<RandomAccessIteratorPair>[num_threads];
for (int s = 0; s < num_threads; ++s)
chunks[s].resize(num_seqs);
#pragma omp parallel num_threads(num_threads)
{
#pragma omp single
{
if (SETTINGS::multiway_merge_splitting == SETTINGS::SAMPLING)
{
parallel_multiway_merge_sampling_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
else // (SETTINGS::multiway_merge_splitting == SETTINGS::EXACT)
{
parallel_multiway_merge_exact_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
}
thread_index_t iam = omp_get_thread_num();
t[iam].tic();
DiffType target_position = 0, local_length = 0;
for (size_t s = 0; s < num_seqs; ++s)
{
target_position += chunks[iam][s].first - seqs_ne[s].first;
local_length += iterpair_size(chunks[iam][s]);
}
sequential_multiway_merge<Stable, false>(
chunks[iam].begin(), chunks[iam].end(),
target + target_position,
std::min(local_length, length - target_position),
comp);
t[iam].tic();
}
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
//update ends of sequences
size_t count_seqs = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0)
raii->first = chunks[num_threads - 1][count_seqs++].second;
}
STXXL_DEBUG_ASSERT(count_seqs == num_seqs);
delete[] chunks;
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
for (int pr = 0; pr < num_threads; ++pr)
t[pr].print();
delete[] t;
return target + length;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<false, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<true, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<false, true>(
seqs_begin, seqs_end, target, length, comp);
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<true, true>(
seqs_begin, seqs_end, target, length, comp);
}
#endif // STXXL_PARALLEL
} // namespace parallel
STXXL_END_NAMESPACE
#endif // !STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
|
GB_unop__identity_int16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_bool)
// op(A') function: GB (_unop_tran__identity_int16_bool)
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_bool)
(
int16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(16*t3+Nx+3,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),32*t4+30);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
omp_reduce.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include "config.h"
void NPomp_dsum_reduce_inplace(double **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double *dst = vec[thread_id];
double *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] += src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_dprod_reduce_inplace(double **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double *dst = vec[thread_id];
double *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] *= src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_zsum_reduce_inplace(double complex **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double complex *dst = vec[thread_id];
double complex *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] += src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_zprod_reduce_inplace(double complex **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double complex *dst = vec[thread_id];
double complex *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] *= src[i];
}
}
}
#pragma omp barrier
}
}
#ifdef _OPENMP
int get_omp_threads() {
return omp_get_max_threads();
}
int set_omp_threads(int n) {
omp_set_num_threads(n);
return n;
}
#else
// mimic omp_get_max_threads omp_set_num_threads function of libgomp
int get_omp_threads() { return 1; }
int set_omp_threads(int n) { return 0; }
#endif
|
degree.h | #pragma once
#ifndef DEGREEORDERCSR_H
#define DEGREEORDERCSR_H
#include "../general.h"
#include <cstdlib>
#include <functional>
namespace PpParallel
{
// Helper function.
//
// This code is generic over both CGraph and SGraph.
template <class AnyGraph>
inline bool compare_degree(const AnyGraph *graph, NodeId v, NodeId w)
{
int64_t degV = graph->out_degree(v);
int64_t degW = graph->out_degree(w);
return !(degV > degW || (degV == degW && v > w));
}
// This code is generic over both CGraph and SGraph.
template <class AnyGraph, bool useRankFormat = false, class Output = std::vector<NodeId>>
void getDegreeOrdering(const AnyGraph &graph, Output &res)
{
using namespace std::placeholders;
auto n = graph.num_nodes();
res.resize(n);
if constexpr (useRankFormat)
{
//Produce result in Rank-Format
std::vector<NodeId> temp(n);
std::iota(temp.begin(), temp.end(), 0);
#ifdef _OPENMP
__gnu_parallel::sort(temp.begin(), temp.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2));
#else
std::sort(temp.begin(), temp.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2));
#endif
#pragma omp parallel for schedule(static, 64)
for (int i = 0; i < n; i++)
res[temp[i]] = i;
}
else
{
//Produce result in Order-Format
std::iota(res.begin(), res.end(), 0);
#ifdef _OPENMP
__gnu_parallel::sort(res.begin(), res.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2));
#else
std::sort(res.begin(), res.end(), std::bind(compare_degree<AnyGraph>, &graph, _1, _2));
#endif
}
}
} // namespace PpParallel
#endif |
mixing_time.h | /*
* Created on: Sep 23, 2016
* Author: Steffen Rechner <steffen.rechner@informatik.uni-halle.de>
*
* This file is part of the marathon software.
*
* Copyright (c) 2016, Steffen Rechner
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PROJECT_MIXINGTIME_H
#define PROJECT_MIXINGTIME_H
#include "variation_distance.h"
#include "state_graph.h"
#include "transition_matrix.h"
namespace marathon {
/**
* A class for the computation of total mixing time and its bounds.
* @tparam T
*/
template<class T=double>
class MixingTimeCalculator {
protected:
const MarkovChain &mc; // markov chain
const StateGraph &sg; // state graph
const size_t omega; // number of states
const std::vector<T> stationary; // stationary distribution
std::vector<T> initStationary() const {
// construct stationary distribution as array
std::vector<T> pi(omega);
const Rational Z = sg.getNormalizingConstant();
for (size_t i = 0; i < omega; i++)
pi[i] = (sg.getWeight(i) / Z).convert_to<T>();
return pi;
}
public:
/**
* Calculate a Mixing Time Calculator.
* @param sg State Graph.
*/
explicit MixingTimeCalculator(const StateGraph &sg) :
mc(sg.getMarkovChain()),
sg(sg),
omega(sg.getNumStates()),
stationary(initStationary()) {
}
/**
* Determine the mixing time of state graph sg while starting with a probability vector p_start.
* @tparam T One of the following: float, double, Rational.
* @param p_start Initial probability distribution.
* @param eps Threshold parameter.
* @return The minimal number of steps t such that ||P^t * p_start - pi|| < eps.
*/
uint mixingTime(const std::vector<T> &p_start, double eps) {
// check trivial cases
if (omega == 0)
throw std::runtime_error("Error! Empty state space!");
else if (omega == 1)
return 0;
// store a single row of the transition matrices P^t and P^t+1
std::vector<T> curr(p_start);
std::vector<T> next(omega);
uint32_t t = 0; // the exponent of P^t
// d is variation distance between P^t[i,] and pi
T d = variationDistance<T>(curr, stationary);
while (d >= eps) {
// for each entry of the probability vector
for (size_t j = 0; j < omega; j++) {
// simulate a single step of matrix multiplication
T x(0);
// for each transition (i,j)
for (Transition *kj : sg.getInArcs(j)) {
const size_t k = kj->from;
const Rational pkj = kj->weight;
x += curr[k] * pkj.convert_to<T>();
}
next[j] = x;
}
std::swap(next, curr);
d = variationDistance<T>(curr, stationary);
//printf("%i: %f\n", t, d);
t++;
}
return t;
}
/**
* Determine the mixing time of state graph sg while starting at state i.
* @tparam T One of the following: float, double, Rational.
* @param i Index of initial state.
* @param eps Threshold parameter.
* @return The minimal number of steps t such that ||P^t[i,] - pi|| < eps.
*/
uint mixingTime(const size_t i, const double eps) {
if (omega == 0 || i < 0 || i >= omega)
throw std::runtime_error("Error! Invalid state index!");
// p_start is initialized as the i-th row of the unit matrix
std::vector<T> p_start(omega, 0);
p_start[i] = T(1);
return mixingTime(p_start, eps);
}
/**
* Determine the maximal mixing time of the states in range [from,to].
* @param begin Index of first state to be considered in calculations.
* @param end Index of first state not to be considered in calculations.
* @param eps Distance to stationary distribution.
* @return The smallest number of steps t until max_{i=begin..end-1}(||P^t_i-pi|| < eps)
*/
uint mixingTime(size_t begin, size_t end, double eps) {
// check trivial cases
if (omega == 0)
throw std::runtime_error("Error! State space is empty!");
else if (omega == 1)
return 0;
uint t_max = 0;
// for each row of the transition matrix P
#pragma omp parallel for
for (size_t i = std::max(begin, 0lu); i < std::min(end, omega); i++) {
// determine mixing time while starting with state i
uint t = mixingTime(i, eps);
#pragma omp critical
t_max = std::max(t, t_max);
}
return t_max;
}
/**
* Determine the total mixing time t of a state graph.
* The total mixing time is defined as the smallest integer t,
* such that max_{i=1..V} ||P^t[i,] - pi|| < eps, where P is
* the transition matrix of a Markov chain, V is its number of
* states and pi is its stationary distribution.
* Running Time: O(t*V^3), where V is the number of states.
* Memory Complexity: O(V).
* @param eps Distance to stationary distribution.
* @return The smallest number of steps t until max_{i=1..omega}(||P^t_i-pi|| < eps)
*/
uint totalMixingTime(const double eps) {
return mixingTime(0, sg.getNumStates(), eps);
}
/**
* Determines the total mixing time t of a state graph.
* The total mixing time is defined as the smallest integer t,
* such that max_{i=1..V} ||P^t[i,] - pi|| < eps, where P is
* the transition matrix of a Markov chain, V is its number of
* states and pi is its stationary distribution.
* Running Time: O(t*V^3), where V is the number of states.
* Memory Complexity: O(V^2), where V is the number of states.
* @tparam T One of the following: float, double, Rational.
* @param sg Pointer to a state graph object.
* @param eps The distance to stationary distribution.
* @return
*/
uint totalMixingTimeDense(const double eps) {
/**********************************************************************
* This implementation searches the total mixing time of mc by a two
* step procedure. Starting with matrix M=P, it squares M until
* dist(M,pi) < eps. By doing so, the method computes the power of
* two r, such that dist(P^r,pi) <= eps and l = r/2 such that
* dist(P^l, pi) > eps.
*
* After finding the limits l and r, it uses binary search for finding
* the smallest t such that dist(P^t,pi) < eps.
*
* In each step of binary search, the boundaries l and r are
* transformed. To compute P^m with m=(l+r)/2 we first compute P^(m-l)
* and multiply the result to P^l. By this, we need three temporary
* matrices.
*********************************************************************/
// check trivial cases
if (omega == 0)
throw std::runtime_error("Error! Empty state space!");
else if (omega == 1)
return 0;
const TransitionMatrix<T> P(sg); // transition matrix
// Phase 1: Search for smallest k such that ||P^(2^k) - pi|| < eps
uint k = 0;
TransitionMatrix<T> A(P); // invariant A = P^(2^k)
while (totalVariationDistance(A, stationary) >= eps) {
A = A * A; // A = P^(2^(k+1))
k++; // invariant restored
}
if (k == 0)
return 1;
// it holds: ||P^(2^k) - pi|| < eps <= ||P^(2^(k-1)) - pi||
uint l = 1u << (k - 1); // l = 2^(k-1)
uint u = 1u << k; // u = 2^k
// Phase 2: Binary search
// invariant: ||P^u - pi|| < eps <= ||P^l - pi||
while (u - l > 1) {
uint m = (l + u) / 2;
A = P.pow(m); // A = P^m
T d = totalVariationDistance(A, stationary);
if (d < eps) {
u = m;
} else {
l = m;
}
}
return u;
}
};
}
#endif //PROJECT_MIXINGTIME_H
|
Updater.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _UPDATER_
#define _UPDATER_
#include "../DatapointPartitions/DatapointPartitions.h"
#include "../Gradient/Gradient.h"
// Some macros to declare extra thread-local / global 1d/2d vectors.
// This avoids the use of std::maps, which are very inefficient.
// Gives around a 2-3x speedup over using maps.
#define REGISTER_THREAD_LOCAL_1D_VECTOR(NAME) std::vector<std::vector<double> > NAME ## _LOCAL_
#define REGISTER_THREAD_LOCAL_2D_VECTOR(NAME) std::vector<std::vector<std::vector<double> > > NAME ## _LOCAL_
#define INITIALIZE_THREAD_LOCAL_1D_VECTOR(NAME, N_COLUMNS) {NAME##_LOCAL_.resize(FLAGS_n_threads); for (int i = 0; i < FLAGS_n_threads; i++) NAME ## _LOCAL_[i].resize(N_COLUMNS, 0);}
#define INITIALIZE_THREAD_LOCAL_2D_VECTOR(NAME, N_ROWS, N_COLUMNS) {NAME##_LOCAL_.resize(FLAGS_n_threads); for (int i = 0; i < FLAGS_n_threads; i++) NAME ## _LOCAL_[i].resize(N_ROWS, std::vector<double>(N_COLUMNS, 0));}
#define GET_THREAD_LOCAL_VECTOR(NAME) NAME ## _LOCAL_[omp_get_thread_num()]
#define REGISTER_GLOBAL_1D_VECTOR(NAME) std::vector<double> NAME ## _GLOBAL_
#define REGISTER_GLOBAL_2D_VECTOR(NAME) std::vector<std::vector<double> > NAME ## _GLOBAL_
#define INITIALIZE_GLOBAL_1D_VECTOR(NAME, N_COLUMNS) {NAME ## _GLOBAL_.resize(N_COLUMNS, 0);}
#define INITIALIZE_GLOBAL_2D_VECTOR(NAME, N_ROWS, N_COLUMNS) {NAME ## _GLOBAL_.resize(N_ROWS, std::vector<double>(N_COLUMNS, 0));}
#define GET_GLOBAL_VECTOR(NAME) NAME ## _GLOBAL_
#define REGISTER_THREAD_LOCAL_DOUBLE(NAME) std::vector<double > NAME ## _LOCAL_
#define INITIALIZE_THREAD_LOCAL_DOUBLE(NAME) {NAME##_LOCAL_.resize(FLAGS_n_threads); std::fill(NAME##_LOCAL_.begin(), NAME##_LOCAL_.end(), 0);}
class Updater {
protected:
// Keep a reference of the model and datapoints, and partition ordering.
Model *model;
std::vector<Datapoint *> datapoints;
DatapointPartitions *datapoint_partitions;
// Have an array of Gradient objects (stores extra info for Model processing).
// Have 1 per thread to avoid conflicts.
Gradient *thread_gradients;
std::vector<int> bookkeeping;
// A reference to all_coordinates, which indexes all the coordinates of the model.
std::vector<int> all_coordinates;
// H, Nu and Mu for updates.
virtual double H(int coordinate, int index_into_coordinate_vector) = 0;
virtual double Nu(int coordinate, int index_into_coordinate_vector) = 0;
virtual double Mu(int coordinate) = 0;
// After calling PrepareNu/Mu/H, for the given coordinates, we expect that
// calls to Nu/Mu/H are ready.
virtual void PrepareNu(std::vector<int> &coordinates) = 0;
virtual void PrepareMu(std::vector<int> &coordinates) = 0;
virtual void PrepareH(Datapoint *datapoint, Gradient *g) = 0;
// By default need catch up.
virtual bool NeedCatchUp() {
return true;
}
virtual void ApplyGradient(Datapoint *datapoint) {
std::vector<double> &model_data = model->ModelData();
int coordinate_size = model->CoordinateSize();
for (int i = 0; i < datapoint->GetCoordinates().size(); i++) {
int index = datapoint->GetCoordinates()[i];
double mu = Mu(index);
for (int j = 0; j < coordinate_size; j++) {
model_data[index * coordinate_size + j] = (1 - mu) * model_data[index * coordinate_size + j]
- Nu(index, j)
+ H(index, j);
}
}
}
virtual void CatchUp(int index, int diff) {
if (!NeedCatchUp()) return;
if (diff < 0) diff = 0;
double geom_sum = 0;
double mu = Mu(index);
if (mu != 0) {
geom_sum = ((1 - pow(1 - mu, diff+1)) / (1 - (1 - mu))) - 1;
}
for (int j = 0; j < model->CoordinateSize(); j++) {
model->ModelData()[index * model->CoordinateSize() + j] =
pow(1 - mu, diff) * model->ModelData()[index * model->CoordinateSize() + j]
- Nu(index, j) * geom_sum;
}
}
virtual void CatchUpDatapoint(Datapoint *datapoint) {
std::vector<double> &model_data = model->ModelData();
int coordinate_size = model->CoordinateSize();
for (int i = 0; i < datapoint->GetCoordinates().size(); i++) {
int index = datapoint->GetCoordinates()[i];
int diff = datapoint->GetOrder() - bookkeeping[index] - 1;
CatchUp(index, diff);
}
}
virtual void FinalCatchUp() {
int coordinate_size = model->CoordinateSize();
std::vector<double> &model_data = model->ModelData();
#pragma omp parallel num_threads(FLAGS_n_threads)
{
PrepareNu(all_coordinates);
PrepareMu(all_coordinates);
#pragma omp for
for (int i = 0; i < model->NumParameters(); i++) {
int diff = model->NumParameters() - bookkeeping[i];
CatchUp(i, diff);
}
}
}
public:
Updater(Model *model, std::vector<Datapoint *> &datapoints) {
// Create gradients for each thread.
thread_gradients = new Gradient[FLAGS_n_threads];
for (int thread = 0; thread < FLAGS_n_threads; thread++) {
thread_gradients[thread] = Gradient();
}
this->model = model;
// Set up bookkeping.
this->datapoints = datapoints;
for (int i = 0; i < model->NumParameters(); i++) {
bookkeeping.push_back(0);
}
// Keep an array that has integers 1...n_coords.
for (int i = 0; i < model->NumParameters(); i++) {
all_coordinates.push_back(i);
}
}
Updater() {}
virtual ~Updater() {
delete [] thread_gradients;
}
// Could be useful to get partitioning info.
virtual void SetUpWithPartitions(DatapointPartitions &partitions) {
datapoint_partitions = &partitions;
}
// Main update method, which is run by multiple threads.
virtual void Update(Model *model, Datapoint *datapoint) {
int thread_num = omp_get_thread_num();
thread_gradients[thread_num].Clear();
thread_gradients[thread_num].datapoint = datapoint;
// First prepare Nu and Mu for catchup since they are independent of the the model.
PrepareNu(datapoint->GetCoordinates());
PrepareMu(datapoint->GetCoordinates());
CatchUpDatapoint(datapoint);
// After catching up, prepare H and apply the gradient.
PrepareH(datapoint, &thread_gradients[thread_num]);
ApplyGradient(datapoint);
// Update bookkeeping.
for (const auto &coordinate : datapoint->GetCoordinates()) {
bookkeeping[coordinate] = datapoint->GetOrder();
}
}
// Called before epoch begins.
virtual void EpochBegin() {
}
// Called when the epoch ends.
virtual void EpochFinish() {
FinalCatchUp();
std::fill(bookkeeping.begin(), bookkeeping.end(), 0);
}
};
#endif
|
parallel-pi.c | #include <stdio.h>
int main(int argc, char const *argv[])
{
double pi, x;
int i, N;
pi = 0.0;
N = 1000;
#pragma omp parallel for private(x) reduction(+:pi)
for(i = 0; i < N; i ++) {
x = (double)i/N;
pi += 4/(1+x*x);
}
pi = pi/N;
printf("%f\n",pi);
return 0;
} |
logger.h | /*
* logger.h
*
* Created on: 2011/04/11
* Author: shu
*/
#ifndef LOGGER_H_
#define LOGGER_H_
#include <iostream>
#include <ostream>
#include <string>
class Logger {
public:
static Logger* GetInstance() {
static Logger instance;
return &instance;
}
void ErrorLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "error : " << message << std::endl;
}
void WarningLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "warning : " << message << std::endl;
}
void Log(std::string message) {
//#pragma omp critical(lock_)
std::cout << message << std::endl;
}
private:
Logger()
{
}
~Logger() {
}
Logger(const Logger& rhs);
Logger operator=(const Logger& rhs);
};
#endif /* LOGGER_H_ */
|
draw.c | #include "oilchange.h"
int is_framebuffer_incomplete()
{
int status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
switch (status)
{
case GL_FRAMEBUFFER_COMPLETE:
return 0;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
printf("framebuffer status: %d incomplete attachment\n", status); return 1;
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
printf("framebuffer status: %d missing attachment\n", status); return 1;
case GL_FRAMEBUFFER_UNSUPPORTED:
printf("framebuffer status: %d unsupported\n", status); return 1;
default:
printf("framebuffer status: %d (unknown)\n", status); return 1;
}
}
//draw everything in the game on the screen
void draw_stuff()
{
float identityM[] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
float shadow_space[16];
glDisable(GL_MULTISAMPLE);
float modelM[16];
memcpy(modelM, identityM, sizeof identityM);
// make shadow map
if (shadow_mapping)
{
glBindFramebuffer(GL_FRAMEBUFFER, shadow_fbo);
if (is_framebuffer_incomplete()) goto fb_is_bad;
glViewport(0, 0, SHADOW_SZ, SHADOW_SZ);
glClear(GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(4.f, 4.f);
//render shadows here
glUseProgram(shadow_prog_id);
// view matrix
float viewM[16];
float f[3];
float moon_pitch = sun_pitch + PI;
if (moon_pitch < 0) moon_pitch += TAU;
float yaw = 3.1415926535 * -0.5f;
sun_pos.x = 100;
sun_pos.y = 100;
sun_pos.z = 100;
float pitch = -0.8f;
lookit(viewM, f, sun_pos.x, sun_pos.y, sun_pos.z, pitch, yaw);
translate(viewM, -sun_pos.x, -sun_pos.y, -sun_pos.z);
// proj matrix
float snear = 10.f; // TODO find closest possible block
float sfar = 10.f + 9000.f;
float x = 1.f / (6000 / 2.f);
float y = -1.f / (6000 / 2.f);
float z = -1.f / ((sfar - snear) / 2.f);
float tz = -(sfar + snear) / (sfar - snear);
float orthoM[] = {
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, tz, 1,
};
float shadow_pvM[16];
if (!lock_culling)
mat4_multiply(shadow_pvM, orthoM, viewM);
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "proj"), 1, GL_FALSE, orthoM);
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "view"), 1, GL_FALSE, viewM);
glUniform1i(glGetUniformLocation(shadow_prog_id, "tarray"), 0);
glUniform1f(glGetUniformLocation(shadow_prog_id, "BS"), BS);
float biasM[] = {
0.5, 0, 0, 1,
0, 0.5, 0, 1,
0, 0, 0.5, 1,
0.5, 0.5, 0.5, 1,
};
float tmpM[16];
mat4_multiply(tmpM, orthoM, viewM);
mat4_multiply(shadow_space, biasM, tmpM);
for (int i = 0; i < VAOW; i++) for (int j = 0; j < VAOD; j++)
{
if (!VBOLEN_(i, j)) continue;
if (!frustum_culling || 1)
{
glBindVertexArray(VAO_(i, j));
modelM[12] = i * BS * CHUNKW;
modelM[14] = j * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(shadow_prog_id, "model"), 1, GL_FALSE, modelM);
glDrawArrays(GL_POINTS, 0, VBOLEN_(i, j));
shadow_polys += VBOLEN_(i, j);
}
}
fb_is_bad:
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDisable(GL_POLYGON_OFFSET_FILL);
}
fog_r = 0.8f;
fog_g = 0.8f;
fog_b = 0.8f;
glViewport(0, 0, screenw, screenh);
glClearColor(fog_r, fog_g, fog_b, 1.f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (antialiasing)
glEnable(GL_MULTISAMPLE);
// compute proj matrix
float near = 8.f;
float far = 99999.f;
float frustw = 4.5f * zoom_amt * screenw / screenh;
float frusth = 4.5f * zoom_amt;
float projM[] = {
near/frustw, 0, 0, 0,
0, near/frusth, 0, 0,
0, 0, -(far + near) / (far - near), -1,
0, 0, -(2.f * far * near) / (far - near), 0
};
// compute view matrix
float eye0 = lerped_pos.x + PLYR_W / 2;
float eye1 = lerped_pos.y + EYEDOWN * (camplayer.sneaking ? 2 : 1);
float eye2 = lerped_pos.z + PLYR_W / 2;
float f[3];
float viewM[16];
lookit(viewM, f, eye0, eye1, eye2, camplayer.pitch, camplayer.yaw);
sun_draw(projM, viewM, sun_pitch, shadow_tex_id);
// find where we are pointing at
rayshot(eye0, eye1, eye2, f[0], f[1], f[2]);
// translate by hand
float translated_viewM[16];
memcpy(translated_viewM, viewM, sizeof viewM);
translate(translated_viewM, -eye0, -eye1, -eye2);
static float pvM[16];
if (!lock_culling)
mat4_multiply(pvM, projM, translated_viewM);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glUseProgram(prog_id);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, material_tex_id);
glUniform1i(glGetUniformLocation(prog_id, "tarray"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, shadow_tex_id);
glUniform1i(glGetUniformLocation(prog_id, "shadow_map"), 1);
glUniform1i(glGetUniformLocation(prog_id, "shadow_mapping"), shadow_mapping);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "proj"), 1, GL_FALSE, projM);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "view"), 1, GL_FALSE, translated_viewM);
glUniformMatrix4fv(glGetUniformLocation(prog_id, "shadow_space"), 1, GL_FALSE, shadow_space);
glUniform1f(glGetUniformLocation(prog_id, "BS"), BS);
if (sun_pitch < PI)
glUniform3f(glGetUniformLocation(prog_id, "light_pos"), sun_pos.x, sun_pos.y, sun_pos.z);
else
glUniform3f(glGetUniformLocation(prog_id, "light_pos"), moon_pos.x, moon_pos.y, moon_pos.z);
glUniform3f(glGetUniformLocation(prog_id, "view_pos"), eye0, eye1, eye2);
{
float m = 0.8f;
glUniform1f(glGetUniformLocation(prog_id, "sharpness"), m*m*m*(m*(m*6.f-15.f)+10.f));
float r = 0.9f;
float g = 0.9f;
float b = 0.9f;
glUniform3f(glGetUniformLocation(prog_id, "day_color"), r, g, b);
glUniform3f(glGetUniformLocation(prog_id, "glo_color"), 0.92f, 0.83f, 0.69f);
glUniform3f(glGetUniformLocation(prog_id, "fog_color"), fog_r, fog_g, fog_b);
}
// determine which chunks to send to gl
TIMER(rings)
int x0 = (eye0 - BS * CHUNKW2) / (BS * CHUNKW);
int z0 = (eye2 - BS * CHUNKW2) / (BS * CHUNKD);
CLAMP(x0, 0, VAOW - 2);
CLAMP(z0, 0, VAOD - 2);
int x1 = x0 + 1;
int z1 = z0 + 1;
int x0d = ((x0 * BS * CHUNKW + BS * CHUNKW2) - eye0);
int x1d = ((x1 * BS * CHUNKW + BS * CHUNKW2) - eye0);
int z0d = ((z0 * BS * CHUNKD + BS * CHUNKD2) - eye2);
int z1d = ((z1 * BS * CHUNKD + BS * CHUNKD2) - eye2);
// initialize with ring0 chunks
struct qitem fresh[VAOW*VAOD] = { // chunkx, distance sq, chunkz
{x0, (x0d * x0d + z0d * z0d), z0},
{x0, (x0d * x0d + z1d * z1d), z1},
{x1, (x1d * x1d + z0d * z0d), z0},
{x1, (x1d * x1d + z1d * z1d), z1}
};
size_t fresh_len = 4;
//qsort(fresh, fresh_len, sizeof(struct qitem), sorter);
#pragma omp critical
{
memcpy(fresh + fresh_len,
(struct qitem *)just_generated,
just_gen_len * sizeof *just_generated);
fresh_len += just_gen_len;
just_gen_len = 0;
}
// position within each ring that we're at this frame
static struct qitem ringpos[VAOW + VAOD] = {0};
for (int r = 1; r < VAOW + VAOD; r++)
{
// expand ring in all directions
x0--; x1++; z0--; z1++;
// freshen farther rings less and less often
if (r >= 3 && r <= 6 && frame % 2 != r % 2) continue;
if (r >= 7 && r <= 14 && frame % 4 != r % 4) continue;
if (r >= 15 && r <= 30 && frame % 8 != r % 8) continue;
if (r >= 31 && frame % 16 != r % 16) continue;
int *x = &ringpos[r].x;
int *z = &ringpos[r].z;
// move to next chunk, maybe on ring
--(*x);
// wrap around the ring
int x_too_low = (*x < x0);
if (x_too_low) { *x = x1; --(*z); }
// reset if out of the ring
int z_too_low = (*z < z0);
if (z_too_low) { *x = x1; *z = z1; }
// get out of the middle
int is_on_ring = (*z == z0 || *z == z1 || *x == x1);
if (!is_on_ring) { *x = x0; }
// render if in bounds
if (*x >= 0 && *x < VAOW && *z >= 0 && *z < VAOD)
{
fresh[fresh_len].x = *x;
fresh[fresh_len].z = *z;
fresh_len++;
}
}
// render non-fresh chunks
TIMER(drawstale)
struct qitem stale[VAOW * VAOD] = {0}; // chunkx, distance sq, chunkz
size_t stale_len = 0;
for (int i = 0; i < VAOW; i++) for (int j = 0; j < VAOD; j++)
{
// skip chunks we will draw fresh this frame
size_t limit = show_fresh_updates ? fresh_len : 4;
for (size_t k = 0; k < limit; k++)
if (fresh[k].x == i && fresh[k].z == j)
goto skip;
stale[stale_len].x = i;
stale[stale_len].z = j;
int xd = ((i * BS * CHUNKW + BS * CHUNKW2) - eye0);
int zd = ((j * BS * CHUNKD + BS * CHUNKD2) - eye2);
stale[stale_len].y = (xd * xd + zd * zd);
stale_len++;
skip: ;
}
for (size_t my = 0; my < stale_len; my++)
{
int myx = stale[my].x;
int myz = stale[my].z;
modelM[12] = myx * BS * CHUNKW;
modelM[14] = myz * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(prog_id, "model"), 1, GL_FALSE, modelM);
glBindVertexArray(VAO_(myx, myz));
glDrawArrays(GL_POINTS, 0, VBOLEN_(myx, myz));
polys += VBOLEN_(myx, myz);
}
// package, ship and render fresh chunks (while the stales are rendering!)
TIMER(buildvbo);
for (size_t my = 0; my < fresh_len; my++)
{
int myx = fresh[my].x;
int myz = fresh[my].z;
int xlo = myx * CHUNKW;
int xhi = xlo + CHUNKW;
int zlo = myz * CHUNKD;
int zhi = zlo + CHUNKD;
int ungenerated = false;
#pragma omp critical
if (!AGEN_(myx, myz))
{
ungenerated = true;
}
if (ungenerated)
continue; // don't bother with ungenerated chunks
glBindVertexArray(VAO_(myx, myz));
glBindBuffer(GL_ARRAY_BUFFER, VBO_(myx, myz));
v = vbuf; // reset vertex buffer pointer
w = wbuf; // same for water buffer
TIMER(buildvbo);
for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH; y++) for (int x = xlo; x < xhi; x++)
{
if (v >= v_limit) break; // out of vertex space, shouldnt reasonably happen
if (w >= w_limit) w -= 10; // just overwrite water if we run out of space
if (T_(x, y, z) == OPEN && (!show_light_values || !in_test_area(x, y, z)))
continue;
//lighting
float usw = CORN_(x , y , z );
float use = CORN_(x+1, y , z );
float unw = CORN_(x , y , z+1);
float une = CORN_(x+1, y , z+1);
float dsw = CORN_(x , y+1, z );
float dse = CORN_(x+1, y+1, z );
float dnw = CORN_(x , y+1, z+1);
float dne = CORN_(x+1, y+1, z+1);
float USW = KORN_(x , y , z );
float USE = KORN_(x+1, y , z );
float UNW = KORN_(x , y , z+1);
float UNE = KORN_(x+1, y , z+1);
float DSW = KORN_(x , y+1, z );
float DSE = KORN_(x+1, y+1, z );
float DNW = KORN_(x , y+1, z+1);
float DNE = KORN_(x+1, y+1, z+1);
int t = T_(x, y, z);
int m = x & (CHUNKW-1);
int n = z & (CHUNKD-1);
if (t == GRAS)
{
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ 0, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ 1, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ 1, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ 1, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ 1, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ 2, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == DIRT || t == GRG1 || t == GRG2)
{
int u = (t == DIRT) ? 2 :
(t == GRG1) ? 3 : 4;
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ u, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ 2, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ 2, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ 2, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ 2, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ 2, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == STON || t == SAND || t == ORE || t == OREH || t == HARD || t == WOOD || t == GRAN ||
t == RLEF || t == YLEF)
{
int f = (t == STON) ? 5 :
(t == SAND) ? 6 :
(t == ORE ) ? 11 :
(t == OREH) ? 12 :
(t == HARD) ? 13 :
(t == WOOD) ? 14 :
(t == GRAN) ? 15 :
(t == RLEF) ? 16 :
(t == YLEF) ? 17 :
0 ;
if (y == 0 || T_(x , y-1, z ) >= OPEN) *v++ = (struct vbufv){ f, UP, m, y, n, usw, use, unw, une, USW, USE, UNW, UNE, 1 };
if (z == 0 || T_(x , y , z-1) >= OPEN) *v++ = (struct vbufv){ f, SOUTH, m, y, n, use, usw, dse, dsw, USE, USW, DSE, DSW, 1 };
if (z == TILESD-1 || T_(x , y , z+1) >= OPEN) *v++ = (struct vbufv){ f, NORTH, m, y, n, unw, une, dnw, dne, UNW, UNE, DNW, DNE, 1 };
if (x == 0 || T_(x-1, y , z ) >= OPEN) *v++ = (struct vbufv){ f, WEST, m, y, n, usw, unw, dsw, dnw, USW, UNW, DSW, DNW, 1 };
if (x == TILESW-1 || T_(x+1, y , z ) >= OPEN) *v++ = (struct vbufv){ f, EAST, m, y, n, une, use, dne, dse, UNE, USE, DNE, DSE, 1 };
if (y < TILESH-1 && T_(x , y+1, z ) >= OPEN) *v++ = (struct vbufv){ f, DOWN, m, y, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 1 };
}
else if (t == WATR)
{
if (y == 0 || T_(x , y-1, z ) == OPEN)
{
int f = 7 + (pframe / 10 + (x ^ z)) % 4;
*w++ = (struct vbufv){ f, UP, m, y+0.06f, n, usw, use, unw, une, USW, USE, UNW, UNE, 0.5f };
*w++ = (struct vbufv){ f, DOWN, m, y-0.94f, n, dse, dsw, dne, dnw, DSE, DSW, DNE, DNW, 0.5f };
}
}
else if (t == LITE)
{
*w++ = (struct vbufv){ 18, SOUTH, m , y, n+0.5f, use, usw, dse, dsw, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, NORTH, m , y, n-0.5f, unw, une, dnw, dne, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, WEST, m+0.5f, y, n , usw, unw, dsw, dnw, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
*w++ = (struct vbufv){ 18, EAST, m-0.5f, y, n , une, use, dne, dse, 1.3f, 1.3f, 1.3f, 1.3f, 1 };
}
if (show_light_values && in_test_area(x, y, z))
{
int f = GLO_(x, y, z) + PNG0;
int ty = y;
float lit = 1.f;
if (IS_OPAQUE(x, y, z))
{
ty = y - 1;
lit = 0.1f;
}
*w++ = (struct vbufv){ f, UP, m, ty+0.9f, n, lit, lit, lit, lit, lit, lit, lit, lit, 1.f };
*w++ = (struct vbufv){ f, DOWN, m, ty-0.1f, n, lit, lit, lit, lit, lit, lit, lit, lit, 1.f };
}
}
if (w - wbuf < v_limit - v) // room for water in vertex buffer?
{
memcpy(v, wbuf, (w - wbuf) * sizeof *wbuf);
v += w - wbuf;
}
VBOLEN_(myx, myz) = v - vbuf;
polys += VBOLEN_(myx, myz);
TIMER(glBufferData)
glBufferData(GL_ARRAY_BUFFER, VBOLEN_(myx, myz) * sizeof *vbuf, vbuf, GL_STATIC_DRAW);
if (my < 4) // draw the newly buffered verts
{
TIMER(glDrawArrays)
modelM[12] = myx * BS * CHUNKW;
modelM[13] = 0.f;
modelM[14] = myz * BS * CHUNKD;
glUniformMatrix4fv(glGetUniformLocation(prog_id, "model"), 1, GL_FALSE, modelM);
glDrawArrays(GL_POINTS, 0, VBOLEN_(myx, myz));
}
}
debrief();
TIMER(swapwindow);
SDL_GL_SwapWindow(win);
TIMER();
}
|
pr91401-2.c | #pragma omp declare target
void f0 (void);
void
f1 (void)
{
int i;
#pragma omp distribute dist_schedule(static) dist_schedule(static) /* { dg-warning "too many 'dist_schedule' clauses" } */
for (i = 0; i < 8; ++i)
f0 ();
#pragma omp distribute dist_schedule(static,2) dist_schedule(static,4) /* { dg-warning "too many 'dist_schedule' clauses" } */
for (i = 0; i < 8; ++i)
f0 ();
}
#pragma omp end declare target
|
FullyDistVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.5 -------------------------------------------------*/
/* date: 10/09/2015 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc, Adam Lugowski ------------*/
/****************************************************************/
/*
Copyright (c) 2010-2015, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a std::copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, std::copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_VEC_H_
#define _FULLY_DIST_VEC_H_
#include<iostream>
#include <fstream>
#include<vector>
#include <utility>
#include <iterator>
#include "CombBLAS.h"
#include "CommGrid.h"
#include "FullyDist.h"
#include "Exception.h"
namespace combblas {
template <class IT, class NT>
class FullyDistSpVec;
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class DenseVectorLocalIterator;
template <typename RETT, typename NU1, typename NU2, typename BINOP>
class EWiseExtToPlainAdapter;
// ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices;
// therefore it can not be 32-bits, in general.
template <class IT, class NT>
class FullyDistVec: public FullyDist<IT,NT, typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type >
{
public:
FullyDistVec ( );
FullyDistVec ( IT globallen, NT initval);
FullyDistVec ( std::shared_ptr<CommGrid> grid);
FullyDistVec ( std::shared_ptr<CommGrid> grid, IT globallen, NT initval);
FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor
FullyDistVec ( const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator
FullyDistVec ( const std::vector<NT> & fillarr, std::shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a std::vector from each processor
template <class ITRHS, class NTRHS>
FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); }
template <class ITRHS, class NTRHS>
FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion
FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator
FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = fixedval;
return *this;
}
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref
//! like operator=, but instead of making a deep std::copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
FullyDistVec<IT,NT> & stealFrom(FullyDistVec<IT,NT> & victim);
FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs);
bool operator==(const FullyDistVec<IT,NT> & rhs) const;
void SetElement (IT indx, NT numx); // element-wise assignment
void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index
NT GetElement (IT indx) const; // element-wise fetch
NT operator[](IT indx) const // more c++ like API
{
return GetElement(indx);
}
void Set(const FullyDistSpVec< IT,NT > & rhs);
template <class NT1, typename _BinaryOperationIdx, typename _BinaryOperationVal>
void GSet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win);
template <class NT1, typename _BinaryOperationIdx>
FullyDistSpVec<IT,NT> GGet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, NT nullValue);
void iota(IT globalsize, NT first);
void RandPerm(); // randomly permute the std::vector
FullyDistVec<IT,IT> sort(); // sort and return the permutation
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized
template <typename _Predicate>
FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true
FullyDistSpVec<IT,NT> Find(NT val) const; //!< Return the elements val is found
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
std::transform(arr.begin(), arr.end(), arr.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = __binary_op(arr[i], i + offset);
}
template <typename _UnaryOperation, typename IRRELEVANT_NT>
void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask);
// extended callback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp);
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp);
// plain fallback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
true);
}
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
applyNulls, nullValue, true);
}
template <typename T1, typename T2>
class retTrue {
public:
bool operator()(const T1& x, const T2& y)
{
return true;
}
};
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>());
}
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue);
}
void PrintToFile(std::string prefix)
{
std::ofstream output;
commGrid->OpenDebugFile(prefix, output);
std::copy(arr.begin(), arr.end(), std::ostream_iterator<NT> (output, " "));
output << std::endl;
output.close();
}
void PrintInfo(std::string vectorname) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
std::pair<IT, NT> MinElement() const; // returns <index, value> std::pair of global minimum
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT identity) const; //! Reduce can be used to implement max_element, for instance
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void SelectCandidates(double nver);
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
std::vector<NT> data() { return arr; }
const std::vector<NT> data() const { return arr; }
// TODO: cache
IT getTotalLength() {
IT v = arr.size();
IT r;
MPI_Allreduce(&v, &r, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
return r;
}
template <typename _BinaryOperation>
void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op);
private:
std::vector< NT > arr;
template <class IU, class NU>
friend class DenseParMat;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class DenseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x );
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename IU, typename NU1, typename NU2, typename _BinaryOperation>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
template <typename IU, typename NU>
friend FullyDistVec<IU,NU> Concatenate ( std::vector< FullyDistVec<IU,NU> > & vecs);
template <typename IU, typename NU>
friend void Augment (FullyDistVec<int64_t, int64_t>& mateRow2Col, FullyDistVec<int64_t, int64_t>& mateCol2Row,
FullyDistVec<int64_t, int64_t>& parentsRow, FullyDistVec<int64_t, int64_t>& leaves);
template <class IU, class DER>
friend SpParMat<IU, bool, DER> PermMat (const FullyDistVec<IU,IU> & ri, const IU ncol);
friend void maximumMatching(SpParMat < int64_t, bool, SpDCCols<int32_t,bool> > & A, FullyDistVec<int64_t, int64_t>& mateRow2Col,FullyDistVec<int64_t, int64_t>& mateCol2Row);
};
}
#include "FullyDistVec.cpp"
#endif
|
miniGMG.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#include <omp.h>
#ifdef __MPI
#include <mpi.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "defines.h"
#include "box.h"
#include "mg.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv){
int MPI_Rank=0;
int MPI_Tasks=1;
int OMP_Threads = 1;
#pragma omp parallel
{
#pragma omp master
{
OMP_Threads = omp_get_num_threads();
}
}
#ifdef __MPI
#warning Compiling for MPI...
int MPI_threadingModel = -1;
//int MPI_threadingModelRequested = MPI_THREAD_SINGLE;
//int MPI_threadingModelRequested = MPI_THREAD_SERIALIZED;
int MPI_threadingModelRequested = MPI_THREAD_FUNNELED;
//int MPI_threadingModelRequested = MPI_THREAD_MULTIPLE;
#ifdef __MPI_THREAD_MULTIPLE
MPI_threadingModelRequested = MPI_THREAD_MULTIPLE;
#endif
MPI_Init_thread(&argc, &argv, MPI_threadingModelRequested, &MPI_threadingModel);
MPI_Comm_size(MPI_COMM_WORLD, &MPI_Tasks);
MPI_Comm_rank(MPI_COMM_WORLD, &MPI_Rank);
if(MPI_threadingModel>MPI_threadingModelRequested)MPI_threadingModel=MPI_threadingModelRequested;
if(MPI_Rank==0){
if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else if(MPI_threadingModelRequested == MPI_THREAD_SINGLE )printf("Requested MPI_THREAD_SINGLE, ");
else if(MPI_threadingModelRequested == MPI_THREAD_FUNNELED )printf("Requested MPI_THREAD_FUNNELED, ");
else if(MPI_threadingModelRequested == MPI_THREAD_SERIALIZED)printf("Requested MPI_THREAD_SERIALIZED, ");
else if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else printf("Requested Unknown MPI Threading Model (%d), ",MPI_threadingModelRequested);
if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else if(MPI_threadingModel == MPI_THREAD_SINGLE )printf("got MPI_THREAD_SINGLE\n");
else if(MPI_threadingModel == MPI_THREAD_FUNNELED )printf("got MPI_THREAD_FUNNELED\n");
else if(MPI_threadingModel == MPI_THREAD_SERIALIZED)printf("got MPI_THREAD_SERIALIZED\n");
else if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else printf("got Unknown MPI Threading Model (%d)\n",MPI_threadingModel);
fflush(stdout); }
#ifdef __MPI_THREAD_MULTIPLE
if( (MPI_threadingModelRequested == MPI_THREAD_MULTIPLE) && (MPI_threadingModel != MPI_THREAD_MULTIPLE) ){MPI_Finalize();exit(0);}
#endif
#endif
int log2_subdomain_dim = 6;
int subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim);
int subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim);
int subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim);
int ranks_in_i=1;
int ranks_in_j=1;
int ranks_in_k=1;
if(argc==2){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim);
subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim);
subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim);
}else if(argc==5){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=atoi(argv[2]);
subdomains_per_rank_in_j=atoi(argv[3]);
subdomains_per_rank_in_k=atoi(argv[4]);
}else if(argc==8){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=atoi(argv[2]);
subdomains_per_rank_in_j=atoi(argv[3]);
subdomains_per_rank_in_k=atoi(argv[4]);
ranks_in_i=atoi(argv[5]);
ranks_in_j=atoi(argv[6]);
ranks_in_k=atoi(argv[7]);
}else if(argc!=1){
if(MPI_Rank==0){printf("usage: ./a.out [log2_subdomain_dim] [subdomains per rank in i,j,k] [ranks in i,j,k]\n");}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
/*
if(log2_subdomain_dim>7){
if(MPI_Rank==0){printf("error, log2_subdomain_dim(%d)>7\n",log2_subdomain_dim);}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
*/
if(ranks_in_i*ranks_in_j*ranks_in_k != MPI_Tasks){
if(MPI_Rank==0){printf("error, ranks_in_i*ranks_in_j*ranks_in_k(%d*%d*%d=%d) != MPI_Tasks(%d)\n",ranks_in_i,ranks_in_j,ranks_in_k,ranks_in_i*ranks_in_j*ranks_in_k,MPI_Tasks);}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
if(MPI_Rank==0)printf("%d MPI Tasks of %d threads\n",MPI_Tasks,OMP_Threads);
int subdomain_dim_i=1<<log2_subdomain_dim;
int subdomain_dim_j=1<<log2_subdomain_dim;
int subdomain_dim_k=1<<log2_subdomain_dim;
// fine dim = 128 64 32 16 8 4
// levels = 6 5 4 3 2 1
//int log2_coarse_dim = 2; // i.e. coarsen to 4^3
int log2_coarse_dim = 1; // i.e. coarsen to 2^3
int levels_in_vcycle=1+log2_subdomain_dim-log2_coarse_dim; // ie 1+log2(fine grid size)-log2(bottom grid size)
if(MPI_Rank==0){printf("truncating the v-cycle at %d^3 subdomains\n",1<<log2_coarse_dim);fflush(stdout);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int box;
domain_type domain;
int boundary_conditions[3] = {__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC}; // i-, j-, and k-directions
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
create_domain(&domain,
subdomain_dim_i,subdomain_dim_j,subdomain_dim_k,
subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k,
ranks_in_i,ranks_in_j,ranks_in_k,
MPI_Rank,
boundary_conditions,
__NumGrids,1,levels_in_vcycle);
double h0=1.0/((double)(domain.dim.i));
if(MPI_Rank==0){printf("initializing alpha, beta, RHS for the ``hard problem''...");fflush(stdout);}
double a=0.9; // i.e. good Helmholtz
double b=0.9;
initialize_problem(&domain,0,h0,a,b);
if(MPI_Rank==0){printf("done\n");fflush(stdout);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MGBuild(&domain,a,b,h0); // restrictions, dominant eigenvalue, etc...
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int s,sMax=2;
#ifdef __MPI
sMax=4;
#endif
//Make initial an guess for u(=0)... Solve Lu=f to precision of 1e-10...print the benchmarking timing results...
MGResetTimers(&domain);for(s=0;s<sMax;s++){zero_grid(&domain,0,__u); MGSolve(&domain,__u,__f,a,b,1e-15);}print_timing(&domain);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// calculate error...
double h3 = h0*h0*h0;
add_grids(&domain,0,__temp,1.0,__u_exact,-1.0,__u); // __temp = __u_exact - __u
double max = norm(&domain,0,__temp); // max norm of error function
double error = sqrt( dot(&domain,0,__temp,__temp)*h3); // normalized L2 error ?
if(MPI_Rank==0){printf("Error test: h = %e, max = %e\n",h0,max);}
if(MPI_Rank==0){printf("Error test: h = %e, L2 = %e\n",h0,error);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
destroy_domain(&domain);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef __MPI
MPI_Finalize();
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return(0);
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 14;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
a.35.6.c | /* { dg-do compile } */
void work (int, int);
void
wrong6 (int n)
{
#pragma omp parallel
{
#pragma omp single
{
work (n, 0);
/* incorrect nesting of barrier region in a single region */
#pragma omp barrier /* { dg-error "may not be closely nested" } */
work (n, 1);
}
}
}
|
HelloOMP.c | #include <stdio.h>
#include <omp.h>
int main(void)
{
#pragma omp parallel
printf("(%d:!!!Hello world!!!)",
omp_get_thread_num());
return(0);
}
|
paraboloid.h | #ifndef batoid_paraboloid_h
#define batoid_paraboloid_h
#include "surface.h"
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class Paraboloid : public Surface {
public:
Paraboloid(double R);
~Paraboloid();
virtual const Surface* getDevPtr() const override;
virtual double sag(double, double) const override;
virtual void normal(
double x, double y,
double& nx, double& ny, double& nz
) const override;
virtual bool timeToIntersect(
double x, double y, double z,
double vx, double vy, double vz,
double& dt
) const override;
private:
const double _R; // Radius of curvature
const double _Rinv; // 1/R
const double _2Rinv; // 1/(2*R)
};
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
lut[NumberCLAHEGrays];
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaCorrectImageTag,progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
NeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
#include <chrono>
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph(): m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_iNeighborhoodScale(2),
m_iCEFScale(2),
m_iRefineIter(2),
m_iCEF(1000),
m_iAddCEF(500),
m_iMaxCheckForRefineGraph(10000)
{}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0;
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n");
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale;
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n");
return;
}
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize);
std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize));
std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>());
for (SizeType i = 0; i < m_iGraphSize; i++)
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
auto t1 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n");
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i);
}
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n");
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++)
{
SizeType start_index = TptreeLeafNodes[i][j].first;
SizeType end_index = TptreeLeafNodes[i][j].second;
if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100));
for (SizeType x = start_index; x < end_index; x++)
{
for (SizeType y = x + 1; y <= end_index; y++)
{
SizeType p1 = TptreeDataIndices[i][x];
SizeType p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count());
}
RefineGraph<T>(index, idmap);
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
for (int iter = 0; iter < m_iRefineIter - 1; iter++)
{
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF * m_iCEFScale);
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
m_iNeighborhoodSize /= m_iNeighborhoodScale;
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF);
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices,
std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::shared_ptr<NeighborhoodGraph> tmp;
if (newGraph == nullptr) {
tmp = NeighborhoodGraph::CreateInstance(Type());
newGraph = tmp.get();
}
SizeType R = (SizeType)indices.size();
newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize);
newGraph->m_iGraphSize = R;
newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < R; i++)
{
if (i % 1000 == 0) LOG(Helper::LogLevel::LL_Info, "\rRefine %f%", static_cast<int>(i * 1.0 / R * 100));
SizeType *outnodes = newGraph->m_pNeighborhoodGraph[i];
COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1);
index->RefineSearchIndex(query, false);
RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1);
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
{
if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]];
if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second;
}
if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end())
outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second;
}
if (output != nullptr) newGraph->SaveGraph(output);
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1);
index->RefineSearchIndex(query, searchDeleted);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1);
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= CEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>> & leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.emplace_back(first, last);
}
else
{
std::vector<float> Mean(index->GetFeatureDim(), 0);
int iIteration = 100;
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] += v[k];
}
}
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(index->GetFeatureDim());
for (DimensionType j = 0; j < index->GetFeatureDim(); j++)
{
Variance.emplace_back(j, 0.0f);
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist*dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<SizeType> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[index->GetFeatureDim() - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (SizeType j = 0; j < count; j++)
{
Val[j] = 0;
const T* v = (const T*)index->GetSample(indices[first + j]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (SizeType j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
const T* v = (const T*)index->GetSample(indices[i]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
PartitionByTptree<T>(index, indices, first, i - 1, leaves);
PartitionByTptree<T>(index, indices, i, last, leaves);
}
}
inline std::uint64_t BufferSize() const
{
return m_pNeighborhoodGraph.BufferSize();
}
ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(input)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(std::string sGraphFilename)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(char* pGraphMemFile)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ErrorCode::Success;
}
ErrorCode SaveGraph(std::string sGraphFilename) const
{
return m_pNeighborhoodGraph.Save(sGraphFilename);
}
ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const
{
return m_pNeighborhoodGraph.Save(output);
}
inline ErrorCode AddBatch(SizeType num)
{
ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num);
if (ret != ErrorCode::Success) return ret;
m_iGraphSize += num;
return ErrorCode::Success;
}
inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; }
inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; }
void Update(SizeType row, DimensionType col, SizeType val) {
std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]);
m_pNeighborhoodGraph[row][col] = val;
}
inline void SetR(SizeType rows) {
m_pNeighborhoodGraph.SetR(rows);
m_iGraphSize = rows;
}
inline SizeType R() const { return m_iGraphSize; }
inline std::string Type() const { return m_pNeighborhoodGraph.Name(); }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
SizeType m_iGraphSize;
COMMON::Dataset<SizeType> m_pNeighborhoodGraph;
FineGrainedLock m_dataUpdateLock;
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
DimensionType m_iNeighborhoodSize;
int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph;
};
}
}
#endif
|
array_of_ptr.c | // minimal example to reveal bug in OpenMP Transformer
// when shared variable is an array of pointer.
#include<stdlib.h>
int main() {
double *a[10]; // array of pointer to double
#pragma omp parallel
{
int i;
#pragma omp for
for (i=1; i<10; i++) {
a[i][1] = 2*a[i-1][2];
}
}
}
|
ompfor2.c | /*
loop scheduling
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int a[20];
int foo(int lower, int upper, int stride)
{
int i;
#pragma omp single
printf("---------default schedule--------------\n");
#pragma omp for nowait
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
#pragma omp barrier
#pragma omp single
printf("---------static schedule--------------\n");
#pragma omp for schedule(static)
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
#pragma omp single
printf("---------(static,5) schedule--------------\n");
#pragma omp for schedule(static,5)
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
#pragma omp single
printf("---------(dynamic,3) schedule--------------\n");
#pragma omp for schedule(dynamic,3)
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
#if 1
#pragma omp single
printf("---------(guided) schedule--------------\n");
#pragma omp for schedule(guided)
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
#endif
#pragma omp single
printf("---------(runtime) ordered schedule--------------\n");
#pragma omp for schedule(runtime) ordered
for (i=lower;i<upper;i+=stride)
{
a[i]=i*2;
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
}
int main(void)
{
//#pragma omp parallel for schedule (auto)
#pragma omp parallel
{
#pragma omp single
printf ("Using %d threads.\n",omp_get_num_threads());
foo(0,20,3);
}
return 0;
}
|
convolution_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __ARM_NEON
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
#else
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
#endif
{
#if __ARM_NEON
int nn_size = size >> 3;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
vst1q_f32(tmpptr, vld1q_f32(img0));
vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4));
img0 += size;
tmpptr += 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#else
int remain_size_start = 0;
int nn_size = size >> 2;
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __ARM_NEON
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 4);
#endif
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_NEON
vst1q_f32(tmpptr, vld1q_f32(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
#endif
img0 += size;
tmpptr += 4;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __ARM_NEON
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#else
float* tmpptr = tmp.channel(i / 4 + i % 4);
#endif
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#if __ARM_NEON
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(nn) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = nn & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = nn & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12");
#else // __aarch64__
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n" // r4 = nn >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n" // r4 = remain = nn & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(nn) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#else
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[2] = {0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float sum00 = biasptr[0];
float sum01 = biasptr[0];
float sum02 = biasptr[0];
float sum03 = biasptr[0];
float sum10 = biasptr[1];
float sum11 = biasptr[1];
float sum12 = biasptr[1];
float sum13 = biasptr[1];
int q = 0;
for (; q < nn; q++)
{
float k0 = kptr[0];
float k1 = kptr[1];
sum00 += tmpptr[0] * k0;
sum01 += tmpptr[1] * k0;
sum02 += tmpptr[2] * k0;
sum03 += tmpptr[3] * k0;
sum10 += tmpptr[0] * k1;
sum11 += tmpptr[1] * k1;
sum12 += tmpptr[2] * k1;
sum13 += tmpptr[3] * k1;
tmpptr += 4;
kptr += 2;
}
outptr0[0] = sum00;
outptr0[1] = sum01;
outptr0[2] = sum02;
outptr0[3] = sum03;
outptr1[0] = sum10;
outptr1[1] = sum11;
outptr1[2] = sum12;
outptr1[3] = sum13;
outptr0 += 4;
outptr1 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float sum00 = biasptr[0];
float sum10 = biasptr[1];
int q = 0;
for (; q < nn; q++)
{
sum00 += tmpptr[0] * kptr[0];
sum10 += tmpptr[0] * kptr[1];
tmpptr++;
kptr += 2;
}
outptr0[0] = sum00;
outptr1[0] = sum10;
outptr0++;
outptr1++;
}
}
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
#if __ARM_NEON
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n" // r4 = nn >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n" // r4 = remain = nn & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
#endif // __ARM_NEON
for (; i + 3 < size; i += 4)
{
#if __ARM_NEON
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
#else
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
#endif // __ARM_NEON
int nn = inch * maxk; // inch always > 0
#if __ARM_NEON
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n" // w4 = nn >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n" // w4 = remain = nn & 3
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n" // r4 = nn >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n" // r4 = remain = nn & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(nn) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8");
#endif // __aarch64__
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
int q = 0;
for (; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i < size; i++)
{
#if __ARM_NEON
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __aarch64__
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif
#else // __ARM_NEON
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
#endif // __ARM_NEON
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
int q = 0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q + 3 < nn; q += 4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __aarch64__
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __aarch64__
sum0 += vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
sum0 += vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
#endif // __ARM_NEON
for (; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __ARM_NEON
#if __aarch64__
kernel_tm.create(32 * maxk, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4);
#endif
#else
kernel_tm.create(2 * maxk, inch, outch / 2 + outch % 2);
#endif // __ARM_NEON
int q = 0;
#if __ARM_NEON
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
float* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __aarch64__
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
float* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
#else
for (; q + 1 < outch; q += 2)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
float* g00 = kernel_tm.channel(q / 2);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00 += 2;
}
}
}
#endif // __ARM_NEON
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
#if __ARM_NEON
#if __aarch64__
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
float* g00 = kernel_tm.channel(q / 4 + q % 4);
#endif
#else
float* g00 = kernel_tm.channel(q / 2 + q % 2);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00 += 1;
}
}
}
}
static void convolution_im2col_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
utils.h | #ifndef _UTILS_
#define _UTILS_
#include "common.h"
#include "cusparse.h"
// print 1D array
template<typename T>
void print_1darray(T *input, int length)
{
for (int i = 0; i < length; i++)
printf("%i, ", input[i]);
printf("\n");
}
/*
__forceinline__ __device__
static double atomicAdd(double *addr, double val)
{
double old = *addr, assumed;
do
{
assumed = old;
old = __longlong_as_double(
atomicCAS((unsigned long long int*)addr,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
}while(assumed != old);
return old;
}*/
template<typename vT>
__forceinline__ __device__
vT sum_32_shfl(vT sum)
{
#pragma unroll
for(int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1)
sum += __shfl_xor(sum, mask);
return sum;
}
/*struct assembly_timer {
timeval t1, t2;
struct timezone tzone;
void start() {
gettimeofday(&t1, &tzone);
}
double stop() {
gettimeofday(&t2, &tzone);
double elapsedTime = 0;
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
return elapsedTime;
}
};*/
void check_cusparse_kernel(cusparseStatus_t cudaerr)
{
if (cudaerr != CUSPARSE_STATUS_SUCCESS)
printf("cuda kernel fail, err = %s\n", cudaerr);
}
template<typename T>
void swap(T *a , T *b)
{
T tmp = *a;
*a = *b;
*b = tmp;
}
// quick sort key-value pair (child function)
template<typename iT, typename vT>
int partition(iT *key, vT *val, int length, int pivot_index)
{
int i = 0 ;
int small_length = pivot_index;
iT pivot = key[pivot_index];
swap<iT>(&key[pivot_index], &key[pivot_index + (length - 1)]);
swap<vT>(&val[pivot_index], &val[pivot_index + (length - 1)]);
for(; i < length; i++)
{
if(key[pivot_index+i] < pivot)
{
swap<iT>(&key[pivot_index+i], &key[small_length]);
swap<vT>(&val[pivot_index+i],&val[small_length]);
small_length++;
}
}
swap<iT>(&key[pivot_index + length - 1], &key[small_length]);
swap<vT>(&val[pivot_index + length - 1],&val[small_length]);
return small_length;
}
// quick sort key-value pair (main function)
template<typename iT, typename vT>
void quick_sort_key_val_pair(iT *key, vT *val, int length)
{
if(length == 0 || length == 1)
return;
int small_length = partition<iT, vT>(key, val, length, 0) ;
quick_sort_key_val_pair<iT, vT>(key, val, small_length);
quick_sort_key_val_pair<iT, vT>(&key[small_length + 1], &val[small_length + 1], length - small_length - 1);
}
/*
template<typename iT>
void move_block(iT* first,
iT* last,
iT* result)
{
//memcpy(result, first, sizeof(iT) * (last - first));
while (first != last)
{
*result = *first;
++result;
++first;
}
}
template<typename iT, typename vT>
void serial_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
while(key_left_start != key_left_end && key_right_start != key_right_end)
{
bool which = *key_right_start < *key_left_start;
//*key_output++ = std::move(which ? *key_right_start++ : *key_left_start++);
*key_output++ = which ? *key_right_start++ : *key_left_start++;
*val_output++ = which ? *val_right_start++ : *val_left_start++;
}
//std::move( key_left_start, key_left_end, key_output );
move_block<iT>(key_left_start, key_left_end, key_output);
move_block<vT>(val_left_start, val_left_end, val_output);
//std::move( key_right_start, key_right_end, key_output );
move_block<iT>(key_right_start, key_right_end, key_output);
move_block<vT>(val_right_start, val_right_end, val_output);
}
// merge sequences [key_left_start,key_left_end) and [key_right_start,key_right_end)
// to output [key_output, key_output+(key_left_end-key_left_start)+(key_right_end-key_right_start))
template<typename iT, typename vT>
void parallel_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
const size_t MERGE_CUT_OFF = 2000;
if( key_left_end - key_left_start + key_right_end - key_right_start <= MERGE_CUT_OFF)
{
serial_merge<iT, vT>(key_left_start, key_left_end, key_right_start, key_right_end, key_output,
val_left_start, val_left_end, val_right_start, val_right_end, val_output);
}
else
{
iT *key_left_middle, *key_right_middle;
vT *val_left_middle, *val_right_middle;
if(key_left_end - key_left_start < key_right_end - key_right_start)
{
key_right_middle = key_right_start + (key_right_end - key_right_start) / 2;
val_right_middle = val_right_start + (val_right_end - val_right_start) / 2;
key_left_middle = std::upper_bound(key_left_start, key_left_end, *key_right_middle);
val_left_middle = val_left_start + (key_left_middle - key_left_start);
}
else
{
key_left_middle = key_left_start + (key_left_end - key_left_start) / 2;
val_left_middle = val_left_start + (val_left_end - val_left_start) / 2;
key_right_middle = std::lower_bound(key_right_start, key_right_end, *key_left_middle);
val_right_middle = val_right_start + (key_right_middle - key_right_start);
}
iT* key_output_middle = key_output + (key_left_middle - key_left_start) + (key_right_middle - key_right_start);
iT* val_output_middle = val_output + (val_left_middle - val_left_start) + (val_right_middle - val_right_start);
#pragma omp task
parallel_merge<iT, vT>(key_left_start, key_left_middle, key_right_start, key_right_middle, key_output,
val_left_start, val_left_middle, val_right_start, val_right_middle, val_output);
parallel_merge<iT, vT>(key_left_middle, key_left_end, key_right_middle, key_right_end, key_output_middle,
val_left_middle, val_left_end, val_right_middle, val_right_end, val_output_middle);
#pragma omp taskwait
}
}
// sorts [key_start,key_end).
// key_temp[0:key_end-key_start) is temporary buffer supplied by caller.
// result is in [key_start,key_end) if inplace==true,
// otherwise in key_temp[0:key_end-key_start).
template<typename iT, typename vT>
void parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
const size_t SORT_CUT_OFF = 500;
if(key_end - key_start <= SORT_CUT_OFF)
{
//std::stable_sort(key_start, key_end);
int list_length = key_end - key_start;
quick_sort_key_val_pair(key_start, val_start, list_length);
if(!inplace)
{
//std::move( key_start, key_end, key_temp );
move_block<iT>(key_start, key_end, key_temp);
move_block<vT>(val_start, val_end, val_temp);
}
}
else
{
iT* key_middle = key_start + (key_end - key_start) / 2;
vT* val_middle = val_start + (val_end - val_start) / 2;
iT* key_temp_middel = key_temp + (key_middle - key_start);
vT* val_temp_middel = val_temp + (val_middle - val_start);
iT* key_temp_end = key_temp + (key_end - key_start);
vT* val_temp_end = val_temp + (val_end - val_start);
#pragma omp task
parallel_merge_sort<iT, vT>(key_start, key_middle, key_temp,
val_start, val_middle, val_temp,
!inplace);
parallel_merge_sort<iT, vT>(key_middle, key_end, key_temp_middel,
val_middle, val_end, val_temp_middel,
!inplace);
#pragma omp taskwait
if(inplace)
parallel_merge<iT, vT>(key_temp, key_temp_middel, key_temp_middel, key_temp_end, key_start,
val_temp, val_temp_middel, val_temp_middel, val_temp_end, val_start);
else
parallel_merge<iT, vT>(key_start, key_middle, key_middle, key_end, key_temp,
val_start, val_middle, val_middle, val_end, val_temp);
}
}
// OpenMP tasks do not run in parallel unless launched inside a thread team.
// This outer wrapper shows how to create the thread team and run the top-level call.
template<typename iT, typename vT>
void do_parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
// Create a thread team.
#pragma omp parallel
// Make only one thread do the top-level call.
// Other threads in team pick up spawned tasks.
#pragma omp single
{
parallel_merge_sort<iT, vT>(key_start, key_end, key_temp,
val_start, val_end, val_temp,
inplace);
}
}
// merge sort key-value pair (main function)
template<typename iT, typename vT>
void omp_merge_sort_key_val_pair(iT *key, vT *val, int length)
{
//quick_sort_key_val_pair<iT, vT>(key, val, length);
if(length == 0 || length == 1)
return;
// allocate temp space for out-of-place merge sort
iT *key_temp = (iT *)malloc(length * sizeof(iT));
vT *val_temp = (vT *)malloc(length * sizeof(vT));
bool inplace = true;
do_parallel_merge_sort<iT, vT>(&key[0], &key[length], key_temp,
&val[0], &val[length], val_temp,
inplace);
// free temp space
free(key_temp);
free(val_temp);
}*/
// in-place exclusive scan
template<typename T>
void exclusive_scan(T *input, int length)
{
if(length == 0 || length == 1)
return;
T old_val, new_val;
old_val = input[0];
input[0] = 0;
for (int i = 1; i < length; i++)
{
new_val = input[i];
input[i] = old_val + input[i-1];
old_val = new_val;
}
}
// segmented sum
template<typename vT, typename bT>
void segmented_sum(vT *input, bT *bit_flag, int length)
{
if(length == 0 || length == 1)
return;
for (int i = 0; i < length; i++)
{
if (bit_flag[i])
{
int j = i + 1;
while (!bit_flag[j] && j < length)
{
input[i] += input[j];
j++;
}
}
}
}
// reduce sum
template<typename T>
T reduce_sum(T *input, int length)
{
if(length == 0)
return 0;
T sum = 0;
for (int i = 0; i < length; i++)
{
sum += input[i];
}
return sum;
}
#endif
|
nevpt_contract.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "fci.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define BLK 48
#define BUFBASE 96
double FCI_t1ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
double FCI_t2ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket,
int bcount, int ncre, int norb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
int nnorb = norb * norb;
int nncre = norb * ncre;
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nncre, &bcount,
&D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb);
}
// (df|ce) E^d_f E^a_e|0> = t_ac
void NEVPTkern_dfec_dfae(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, m, n;
size_t k;
double *cp0, *cp1;
double *t2t; // E^d_fE^a_e with ae transposed
#pragma omp parallel private(cp0, cp1, t2t, m, n, i, k)
{
t2t = malloc(sizeof(double) * n4);
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
for (i = 0; i < nnorb; i++) {
cp0 = t2ket + k * n4 + i * nnorb;
cp1 = t2t + i * nnorb;
for (m = 0; m < norb; m++) {
for (n = 0; n < norb; n++) {
cp1[n*norb+m] = cp0[m*norb+n];
}
}
}
dgemm_(&TRANS_N, &TRANS_T, &norb, &norb, &n3,
&D1, eri, &norb, t2t, &norb,
&D0, gt2+nnorb*k, &norb);
}
free(t2t);
}
}
// (df|ea) E^e_c E^d_f|0> = t_ac
void NEVPTkern_aedf_ecdf(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, m, n;
size_t k;
double *cp0, *cp1;
double *t2t;
#pragma omp parallel private(cp0, cp1, t2t, m, n, i, k)
{
t2t = malloc(sizeof(double) * n4);
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
for (m = 0; m < norb; m++) {
for (n = 0; n < norb; n++) {
cp0 = t2ket + k * n4 + (m*norb+n) * nnorb;
cp1 = t2t + (n*norb+m) * nnorb;
for (i = 0; i < nnorb; i++) {
cp1[i] = cp0[i];
}
}
}
dgemm_(&TRANS_T, &TRANS_N, &norb, &norb, &n3,
&D1, t2t, &n3, eri, &n3,
&D0, gt2+nnorb*k, &norb);
}
free(t2t);
}
}
// (df|ce) E^a_e E^d_f|0> = t_ac
void NEVPTkern_cedf_aedf(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
size_t k;
int blen;
#pragma omp parallel private(k, blen)
#pragma omp for schedule(dynamic, 1)
for (k = 0; k < bcount; k+=8) {
blen = MIN(bcount-k, 8) * norb;
dgemm_(&TRANS_T, &TRANS_N, &norb, &blen, &n3,
&D1, eri, &n3, t2ket+n4*k, &n3,
&D0, gt2+nnorb*k, &norb);
}
}
// (df|ea) E^d_f E^e_c|0> = t_ac
void NEVPTkern_dfea_dfec(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
size_t k;
#pragma omp parallel private(k)
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
dgemm_(&TRANS_N, &TRANS_T, &norb, &norb, &n3,
&D1, t2ket+n4*k, &norb, eri, &norb,
&D0, gt2+nnorb*k, &norb);
}
}
// TODO: NEVPTkern_spin0 stra_id >= strb_id as FCI4pdm_kern_spin0
void NEVPTkern_sf(void (*contract_kernel)(),
double *rdm2, double *rdm3, double *eri, double *ci0,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, j, k, l, ij;
size_t n;
double *t1ket = malloc(sizeof(double) * nnorb * bcount);
double *t2ket = malloc(sizeof(double) * n4 * bcount);
double *gt2 = malloc(sizeof(double) * nnorb * bcount);
double *tbra, *pbra, *pt2;
// t2[:,i,j,k,l] = E^i_j E^k_l|ket>
FCI_t1ci_sf(ci0, t1ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(ci0, t2ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
(*contract_kernel)(gt2, eri, t2ket, bcount, norb, na, nb);
#pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2)
{
tbra = malloc(sizeof(double) * nnorb * bcount);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
i = ij / norb;
j = ij - i * norb;
for (n = 0; n < bcount; n++) {
for (k = 0; k <= j; k++) {
pbra = tbra + n * nnorb + k*norb;
pt2 = t2ket + n * n4 + k*nnorb + ij;
for (l = 0; l < norb; l++) {
pbra[l] = pt2[l*n3];
}
}
}
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, gt2,
bcount, j+1, norb);
}
free(tbra);
}
// reordering of rdm2 is needed: rdm2.transpose(1,0,2,3)
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount,
&D1, gt2, &nnorb, t1ket, &nnorb,
&D1, rdm2, &nnorb);
free(gt2);
free(t1ket);
free(t2ket);
}
void NEVPTcontract(void (*kernel)(),
double *rdm2, double *rdm3, double *eri, double *ci0,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
const size_t nnorb = norb * norb;
const size_t n4 = nnorb * nnorb;
int i, j, k, ib, strk, bcount;
double *pdm2 = malloc(sizeof(double) * n4);
double *cp1, *cp0;
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
memset(pdm2, 0, sizeof(double) * n4);
memset(rdm3, 0, sizeof(double) * n4 * nnorb);
for (strk = 0; strk < na; strk++) {
for (ib = 0; ib < nb; ib += BUFBASE) {
bcount = MIN(BUFBASE, nb-ib);
NEVPTkern_sf(kernel, pdm2, rdm3,
eri, ci0, bcount, strk, ib,
norb, na, nb, nlinka, nlinkb, clinka, clinkb);
}
}
free(clinka);
free(clinkb);
for (i = 0; i < norb; i++) {
for (j = 0; j < norb; j++) {
cp1 = rdm2 + (i*norb+j) * nnorb;
cp0 = pdm2 + (j*norb+i) * nnorb;
for (k = 0; k < nnorb; k++) {
cp1[k] = cp0[k];
}
} }
free(pdm2);
}
|
nanopore_hdp.c | //
// nanopore_hdp.c
//
//
// Created by Jordan Eizenga on 1/8/16.
//
//
// in 0-based index
#define ALIGNMENT_KMER_COL 9
#define ALIGNMENT_STRAND_COL 4
#define ALIGNMENT_SIGNAL_COL 13
#define NUM_ALIGNMENT_COLS 15
#define MODEL_ROW_HEADER_LENGTH 0
#define MODEL_MEAN_ENTRY 0
#define MODEL_NOISE_ENTRY 1
#define MODEL_ENTRY_LENGTH 5
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include "pairwiseAligner.h"
#include "hdp_math_utils.h"
NanoporeHDP* package_nanopore_hdp(HierarchicalDirichletProcess* hdp, const char* alphabet, int64_t alphabet_size,
int64_t kmer_length) {
NanoporeHDP* nhdp = (NanoporeHDP*) malloc(sizeof(NanoporeHDP));
// copy and sort alphabet
char* internal_alphabet = (char*) malloc(sizeof(char) * (alphabet_size + 1));
for (int64_t i = 0; i < alphabet_size; i++) {
internal_alphabet[i] = alphabet[i];
}
int64_t min_idx;
char temp;
for (int64_t i = 0; i < alphabet_size; i++) {
min_idx = i;
for (int64_t j = i + 1; j < alphabet_size; j++) {
if (internal_alphabet[j] < internal_alphabet[min_idx]) {
min_idx = j;
}
}
temp = internal_alphabet[i];
internal_alphabet[i] = internal_alphabet[min_idx];
internal_alphabet[min_idx] = temp;
}
for (int64_t i = 1; i < alphabet_size; i++) {
if (alphabet[i - 1] == alphabet[i]) {
fprintf(stderr, "Characters of alphabet must be distinct.\n");
exit(EXIT_FAILURE);
}
}
internal_alphabet[alphabet_size] = '\0';
nhdp->hdp = hdp;
nhdp->alphabet = internal_alphabet;
nhdp->alphabet_size = alphabet_size;
nhdp->kmer_length = kmer_length;
// note: destroying the HDP housed in the NHDP will destroy the DistributionMetricMemo
nhdp->distr_metric_memos = stSet_construct2(&free);
return nhdp;
}
void destroy_nanopore_hdp(NanoporeHDP* nhdp) {
destroy_hier_dir_proc(nhdp->hdp);
stSet_destruct(nhdp->distr_metric_memos);
free(nhdp->alphabet);
free(nhdp);
}
int64_t get_nanopore_hdp_kmer_length(NanoporeHDP* nhdp) {
return nhdp->kmer_length;
}
int64_t get_nanopore_hdp_alphabet_size(NanoporeHDP* nhdp) {
return nhdp->alphabet_size;
}
char* get_nanopore_hdp_alphabet(NanoporeHDP* nhdp) {
char* alphabet = nhdp->alphabet;
int64_t alphabet_size = nhdp->alphabet_size;
char* copy = (char*) malloc(sizeof(char) * (alphabet_size + 1));
for (int64_t i = 0; i < alphabet_size; i++) {
copy[i] = alphabet[i];
}
copy[alphabet_size] = '\0';
return copy;
}
// wrappers
void execute_nhdp_gibbs_sampling(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in,
int64_t thinning, bool verbose) {
execute_gibbs_sampling(nhdp->hdp, num_samples, burn_in, thinning, verbose);
}
void execute_nhdp_gibbs_sampling_with_snapshots(NanoporeHDP* nhdp,
int64_t num_samples, int64_t burn_in, int64_t thinning,
void (*snapshot_func)(HierarchicalDirichletProcess*, void*),
void* snapshot_func_args, bool verbose) {
execute_gibbs_sampling_with_snapshots(nhdp->hdp, num_samples, burn_in, thinning, snapshot_func, snapshot_func_args,
verbose);
}
void finalize_nhdp_distributions(NanoporeHDP* nhdp) {
finalize_distributions(nhdp->hdp);
}
void normal_inverse_gamma_params_from_minION(const char* model_filepath, double* mu_out, double* nu_out,
double* alpha_out, double* beta_out) {
// model format:
// stateNumber \t alphabetSize \t alphabet \t kmerSize
// [level_mean, level_stdv, noise_mean, noise_stdv, noise_lambda]
FILE* model_file = fopen(model_filepath, "r");
char* line = stFile_getLineFromFile(model_file);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 4) {
st_errAbort("normal_inverse_gamma_params_from_minION: Model format has changed invalid model"
"found here %s\n", model_filepath);
}
free(line);
stList_destruct(tokens);
// ignore transitions line
line = stFile_getLineFromFile(model_file);
tokens = stString_split(line);
if (stList_length(tokens) != 10) {
st_errnoAbort("More than 3-state hmm transitions parameters found\n");
}
line = stFile_getLineFromFile(model_file);
tokens = stString_split(line);
int64_t table_length = (stList_length(tokens) - MODEL_ROW_HEADER_LENGTH) / MODEL_ENTRY_LENGTH;
double* means = (double*) malloc(sizeof(double) * table_length);
double* precisions = (double*) malloc(sizeof(double) * table_length);
int64_t mean_offset = MODEL_ROW_HEADER_LENGTH + MODEL_MEAN_ENTRY; // 1
int64_t noise_offset = MODEL_ROW_HEADER_LENGTH + MODEL_NOISE_ENTRY; // 2
char* mean_str;
char* noise_str;
double noise;
for (int i = 0; i < table_length; i++) {
mean_str = (char*) stList_get(tokens, mean_offset + i * MODEL_ENTRY_LENGTH);
sscanf(mean_str, "%lf", &(means[i]));
noise_str = (char*) stList_get(tokens, noise_offset + i * MODEL_ENTRY_LENGTH);
sscanf(noise_str, "%lf", &noise);
precisions[i] = 1.0 / (noise * noise);
}
free(line);
stList_destruct(tokens);
mle_normal_inverse_gamma_params(means, precisions, table_length, mu_out, nu_out, alpha_out, beta_out);
free(means);
free(precisions);
fclose(model_file);
}
// fixed concentration parameters 'gamma' for each depth
HierarchicalDirichletProcess* minION_hdp(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double mu, nu, alpha, beta;
normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta);
return new_hier_dir_proc(num_dps, depth, gamma, sampling_grid_start, sampling_grid_stop,
sampling_grid_length, mu, nu, alpha, beta);
}
// Gamma distribution prior on the concentration parameters 'gamma'
// must designate vector of 'alpha' and 'beta' parameters of distribution for each depth
HierarchicalDirichletProcess* minION_hdp_2(int64_t num_dps, int64_t depth, double* gamma_alpha,
double* gamma_beta, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double mu, nu, alpha, beta;
normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta);
return new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta);
}
void update_nhdp_from_alignment(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header) {
update_nhdp_from_alignment_with_filter(nhdp, alignment_filepath, has_header, NULL);
}
void update_nhdp_from_alignment_with_filter(NanoporeHDP* nhdp, const char* alignment_filepath,
bool has_header, const char* strand_filter) {
stList* signal_list = stList_construct3(0, &free);
stList* dp_id_list = stList_construct3(0, &free);
FILE* align_file = fopen(alignment_filepath, "r");
if (align_file == NULL) {
fprintf(stderr, "Alignment %s file does not exist.\n", alignment_filepath);
exit(EXIT_FAILURE);
}
stList* tokens;
int64_t line_length;
char* kmer;
char* strand;
char* signal_str;
int64_t* dp_id_ptr;
double* signal_ptr;
bool warned = false;
int proceed = 0;
char* line = stFile_getLineFromFile(align_file);
if (has_header) {
line = stFile_getLineFromFile(align_file);
}
while (line != NULL) {
tokens = stString_split(line);
line_length = stList_length(tokens);
if (!warned) {
if (line_length != NUM_ALIGNMENT_COLS) {
fprintf(stderr, "Input format has changed from design period, HDP may receive incorrect data.\n");
warned = true;
}
}
strand = (char*) stList_get(tokens, ALIGNMENT_STRAND_COL);
if (strand_filter != NULL) {
proceed = strcmp(strand, strand_filter);
}
if (proceed == 0) {
signal_str = (char*) stList_get(tokens, ALIGNMENT_SIGNAL_COL);
kmer = (char*) stList_get(tokens, ALIGNMENT_KMER_COL);
signal_ptr = (double*) malloc(sizeof(double));
dp_id_ptr = (int64_t*) malloc(sizeof(int64_t));
sscanf(signal_str, "%lf", signal_ptr);
*dp_id_ptr = kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length);
stList_append(signal_list, signal_ptr);
stList_append(dp_id_list, dp_id_ptr);
}
stList_destruct(tokens);
free(line);
line = stFile_getLineFromFile(align_file);
}
fclose(align_file);
int64_t data_length;
double* signal = stList_toDoublePtr(signal_list, &data_length);
int64_t* dp_ids = stList_toIntPtr(dp_id_list, &data_length);
stList_destruct(signal_list);
stList_destruct(dp_id_list);
reset_hdp_data(nhdp->hdp);
pass_data_to_hdp(nhdp->hdp, signal, dp_ids, data_length);
}
// n^k
int64_t power(int64_t n, int64_t k) {
int64_t num = 1;
for (int64_t i = 0; i < k; i++) {
num *= n;
}
return num;
}
// ((n k))
int64_t multiset_number(int64_t n, int64_t k) {
int64_t num = 1;
for (int64_t m = n + k - 1; m >= n; m--) {
num *= m;
}
for (int64_t m = k; m >= 2; m--) {
num /= m;
}
return num;
}
int64_t* get_word(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* word = (int64_t*) malloc(sizeof(int64_t) * word_length);
int64_t id_remainder = word_id;
for (int64_t i = 0; i < word_length; i++) {
word[word_length - i - 1] = id_remainder % alphabet_size;
id_remainder /= alphabet_size;
}
return word;
}
int64_t* get_word_multiset(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* multiset = get_word(word_id, alphabet_size, word_length);
// selection sort 'cause whatever
int64_t min_idx;
int64_t temp;
for (int64_t i = 0; i < word_length; i++) {
min_idx = i;
for (int64_t j = i + 1; j < word_length; j++) {
if (multiset[j] < multiset[min_idx]) {
min_idx = j;
}
}
temp = multiset[i];
multiset[i] = multiset[min_idx];
multiset[min_idx] = temp;
}
return multiset;
}
int64_t multiset_id_internal(int64_t* tail, int64_t tail_length, int64_t alphabet_min, int64_t alphabet_size) {
int64_t head = tail[0];
if (tail_length == 1) {
return head - alphabet_min;
}
int64_t step = 0;
for (int64_t i = alphabet_min; i < alphabet_size; i++) {
if (head > i) {
step += multiset_number(alphabet_size - i, tail_length - 1);
}
else {
return step + multiset_id_internal(&(tail[1]), tail_length - 1, i, alphabet_size);
}
}
fprintf(stderr, "Character outside alphabet included in multiset\n");
exit(EXIT_FAILURE);
}
int64_t multiset_id(int64_t* multiset, int64_t length, int64_t alphabet_size) {
return multiset_id_internal(multiset, length, 0, alphabet_size);
}
int64_t word_id_to_multiset_id(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* multiset = get_word_multiset(word_id, alphabet_size, word_length);
int64_t id = multiset_id(multiset, word_length, alphabet_size);
free(multiset);
return id;
}
int64_t word_id(int64_t* word, int64_t alphabet_size, int64_t word_length) {
int64_t id = 0;
int64_t step = 1;
for (int64_t i = word_length - 1; i >= 0; i--) {
id += step * word[i];
step *= alphabet_size;
}
return id;
}
int64_t* kmer_to_word(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) {
int64_t* word = (int64_t*) malloc(sizeof(int64_t) * kmer_length);
for (int64_t i = 0; i < kmer_length; i++) {
int64_t j = 0;
while (kmer[i] != alphabet[j]) {
j++;
if (j == alphabet_size) {
fprintf(stderr, "[signalAlign] - ERROR: K-mer contains character outside alphabet. "
"Got offending kmer is: %s. alphabet is %s kmer length %"PRId64"\n",
kmer, alphabet, kmer_length);
exit(EXIT_FAILURE);
}
}
word[i] = j;
}
return word;
}
int64_t kmer_id(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) {
int64_t* word = kmer_to_word(kmer, alphabet, alphabet_size, kmer_length);
int64_t id = word_id(word, alphabet_size, kmer_length);
free(word);
return id;
}
int64_t standard_kmer_id(char* kmer, int64_t kmer_length) {
return kmer_id(kmer, "ACGT", 4, kmer_length);
}
int64_t nhdp_kmer_id(NanoporeHDP* nhdp, char* kmer) {
return kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length);
}
double get_nanopore_kmer_density(NanoporeHDP* nhdp, void *kmer, void *x) {
if (kmer == NULL) {
return LOG_ZERO;
} else {
double u = *(double *)x;
//return dir_proc_density(nhdp->hdp, *(double *) x, nhdp_kmer_id(nhdp, (char *)kmer));
return dir_proc_density(nhdp->hdp, u, nhdp_kmer_id(nhdp, (char *)kmer));
}
}
double get_kmer_distr_distance(NanoporeDistributionMetricMemo* memo, char* kmer_1, char* kmer_2) {
NanoporeHDP* nhdp = memo->nhdp;
return get_dir_proc_distance(memo->memo, nhdp_kmer_id(nhdp, kmer_1), nhdp_kmer_id(nhdp, kmer_2));
}
NanoporeDistributionMetricMemo* package_nanopore_metric_memo(NanoporeHDP* nhdp, DistributionMetricMemo* memo) {
NanoporeDistributionMetricMemo* nanopore_memo = (NanoporeDistributionMetricMemo*) malloc(sizeof(NanoporeDistributionMetricMemo));
nanopore_memo->nhdp = nhdp;
nanopore_memo->memo = memo;
return nanopore_memo;
}
NanoporeDistributionMetricMemo* new_nhdp_kl_divergence_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_kl_divergence_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_hellinger_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_hellinger_distance_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_l2_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_l2_distance_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_shannon_jensen_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_shannon_jensen_distance_memo(nhdp->hdp));
}
double compare_nhdp_distrs_kl_divergence(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_kl_divergence(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_l2_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_l2_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_shannon_jensen_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_shannon_jensen_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_hellinger_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_hellinger_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double kmer_distr_expected_val(NanoporeHDP* nhdp, char* kmer) {
return dir_proc_expected_val(nhdp->hdp, nhdp_kmer_id(nhdp, kmer));
}
double kmer_distr_variance(NanoporeHDP* nhdp, char* kmer) {
return dir_proc_variance(nhdp->hdp, nhdp_kmer_id(nhdp, kmer));
}
int64_t flat_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
return num_leaves + 1;
}
void flat_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t last_dp_id = power(alphabet_size, kmer_length);
for (int64_t id = 0; id < last_dp_id; id++) {
set_dir_proc_parent(hdp, id, last_dp_id);
}
}
NanoporeHDP* flat_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 2);
gamma_params[0] = base_gamma;
gamma_params[1] = leaf_gamma;
int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 2, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
flat_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* flat_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 2);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 2);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = leaf_gamma_beta;
int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 2, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
flat_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t multiset_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length);
return num_leaves + num_middle_dps + 1;
}
void multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length);
// set kmer parents to multisets
int64_t multiset_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
multiset_id = word_id_to_multiset_id(kmer_id, alphabet_size, kmer_length);
set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id);
}
// set multiset parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
NanoporeHDP* multiset_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
multiset_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* multiset_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta,
double middle_gamma_alpha, double middle_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
multiset_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t middle_2_nts_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
if (kmer_length <= 2) {
fprintf(stderr, "k-mer is not long enough for middle 2 nucleotides HDP\n");
exit(EXIT_FAILURE);
}
return power(alphabet_size, kmer_length) + power(alphabet_size, 2) + 1;
}
int64_t kmer_id_to_middle_nts_id(int64_t kmer_id, int64_t alphabet_size, int64_t kmer_length) {
int64_t* kmer = get_word(kmer_id, alphabet_size, kmer_length);
int64_t id = alphabet_size * kmer[kmer_length / 2 - 1] + kmer[kmer_length / 2];
free(kmer);
return id;
}
void middle_2_nts_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = power(alphabet_size, 2);
int64_t middle_dp_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
middle_dp_id = kmer_id_to_middle_nts_id(kmer_id, alphabet_size, kmer_length);
set_dir_proc_parent(hdp, kmer_id, middle_dp_id + num_leaves);
}
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t id = num_leaves; id < last_dp_id; id++) {
set_dir_proc_parent(hdp, id, last_dp_id);
}
}
NanoporeHDP* middle_2_nts_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
if (kmer_length % 2 != 0) {
fprintf(stderr, "Warning: middle two nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n");
}
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t word_id_to_group_multiset_id(int64_t word_id, int64_t* char_groups, int64_t alphabet_size,
int64_t word_length, int64_t num_groups) {
int64_t* word = get_word(word_id, alphabet_size, word_length);
for (int64_t i = 0; i < word_length; i++) {
word[i] = char_groups[word[i]];
}
int64_t min_idx;
int64_t temp;
for (int64_t i = 0; i < word_length; i++) {
min_idx = i;
for (int64_t j = i + 1; j < word_length; j++) {
if (word[j] < word[min_idx]) {
min_idx = j;
}
}
temp = word[i];
word[i] = word[min_idx];
word[min_idx] = temp;
}
int64_t id = multiset_id(word, word_length, num_groups);
free(word);
return id;
}
int64_t group_multiset_hdp_num_dps(int64_t alphabet_size, int64_t* char_groups, int64_t kmer_length) {
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(num_groups, kmer_length);
return num_leaves + num_middle_dps + 1;
}
void group_multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t* char_groups,
int64_t alphabet_size, int64_t kmer_length) {
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(num_groups, kmer_length);
// set kmer parents to multisets
int64_t multiset_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
multiset_id = word_id_to_group_multiset_id(kmer_id, char_groups, alphabet_size, kmer_length, num_groups);
set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id);
}
// set multiset parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
void confirm_valid_groupings(int64_t* char_groups, int64_t alphabet_size) {
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] < 0) {
fprintf(stderr, "Group numbers must be non-negative.\n");
exit(EXIT_FAILURE);
}
}
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
for (int64_t i = 0; i < num_groups; i++) {
bool found_group = false;
for (int64_t j = 0; j < alphabet_size; j++) {
if (char_groups[j] == i) {
found_group = true;
break;
}
}
if (!found_group) {
fprintf(stderr, "Groups must be consecutively numbered starting with 0.\n");
exit(EXIT_FAILURE);
}
}
}
int64_t* alphabet_sort_groups(const char* alphabet, int64_t* char_groups, int64_t alphabet_size) {
char* aux_alphabet = (char*) malloc(sizeof(char) * alphabet_size);
int64_t* sorted_char_groups = (int64_t*) malloc(sizeof(int64_t) * alphabet_size);
for (int64_t i = 0; i < alphabet_size; i++) {
aux_alphabet[i] = alphabet[i];
sorted_char_groups[i] = char_groups[i];
}
int64_t temp_group;
char temp_char;
int64_t min_idx;
for (int64_t i = 0; i < alphabet_size; i++) {
min_idx = i;
for (int64_t j = i + 1; j < alphabet_size; j++) {
if (aux_alphabet[j] < aux_alphabet[min_idx]) {
min_idx = j;
}
}
temp_char = aux_alphabet[i];
aux_alphabet[i] = aux_alphabet[min_idx];
aux_alphabet[min_idx] = temp_char;
temp_group = sorted_char_groups[i];
sorted_char_groups[i] = sorted_char_groups[min_idx];
sorted_char_groups[min_idx] = temp_group;
}
free(aux_alphabet);
return sorted_char_groups;
}
// assumes char_groups are 0-based and consecutively numbered
NanoporeHDP* group_multiset_hdp_model(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
confirm_valid_groupings(char_groups, alphabet_size);
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t* sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size);
group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length);
free(sorted_char_groups);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
// assumes char_groups are 0-based and consecutively numbered
NanoporeHDP* group_multiset_hdp_model_2(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha,
double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
confirm_valid_groupings(char_groups, alphabet_size);
double *gamma_alpha = (double *) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double *gamma_beta = (double *) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length);
HierarchicalDirichletProcess *hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t *sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size);
group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length);
free(sorted_char_groups);
finalize_hdp_structure(hdp);
NanoporeHDP *nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* middle_2_nts_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha,
double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
if (kmer_length % 2 != 0) {
fprintf(stderr, "Warning: middle 2 nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n");
}
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t purine_composition_hdp_num_dps(int64_t num_purines, int64_t num_pyrimidines, int64_t kmer_length) {
int64_t num_leaves = power(num_purines + num_pyrimidines, kmer_length);
int64_t num_middle_dps = kmer_length + 1;
return num_leaves + num_middle_dps + 1;
}
void purine_composition_hdp_model_internal(HierarchicalDirichletProcess* hdp, bool* purine_alphabet,
int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = kmer_length + 1;
// set kmer parents to purine multisets
int64_t num_purines;
int64_t* word;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
word = get_word(kmer_id, alphabet_size, kmer_length);
num_purines = 0;
for (int64_t i = 0; i < kmer_length; i++) {
if (purine_alphabet[word[i]]) {
num_purines++;
}
}
free(word);
set_dir_proc_parent(hdp, kmer_id, num_leaves + num_purines);
}
// set purine set parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
NanoporeHDP* purine_composition_hdp_model(char* purine_alphabet, int64_t num_purines,
char* pyrimidine_alphabet, int64_t num_pyrimidines,
int64_t kmer_length, double base_gamma, double middle_gamma,
double leaf_gamma, double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t alphabet_size = num_purines + num_pyrimidines;
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
alphabet[i] = purine_alphabet[i];
}
for (int64_t i = 0; i < num_pyrimidines; i++) {
alphabet[i + num_purines] = pyrimidine_alphabet[i];
}
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
// get back the alphabet in the internal ordering
free(alphabet);
alphabet = get_nanopore_hdp_alphabet(nhdp);
bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
purines[i] = false;
for (int64_t j = 0; j < num_purines; j++) {
if (alphabet[i] == purine_alphabet[j]) {
purines[i] = true;
break;
}
}
}
free(alphabet);
purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length);
free(purines);
finalize_hdp_structure(hdp);
return nhdp;
}
NanoporeHDP* purine_composition_hdp_model_2(char* purine_alphabet, int64_t num_purines,
char* pyrimidine_alphabet, int64_t num_pyrimidines,
int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta,
double middle_gamma_alpha, double middle_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t alphabet_size = num_purines + num_pyrimidines;
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
alphabet[i] = purine_alphabet[i];
}
for (int64_t i = 0; i < num_pyrimidines; i++) {
alphabet[i + num_purines] = pyrimidine_alphabet[i];
}
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
// get back the alphabet in the internal ordering
free(alphabet);
alphabet = get_nanopore_hdp_alphabet(nhdp);
bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size);
for (int64_t i = 0; i < alphabet_size; i++) {
purines[i] = false;
for (int64_t j = 0; j < num_purines; j++) {
if (alphabet[i] == purine_alphabet[j]) {
purines[i] = true;
break;
}
}
}
free(alphabet);
purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length);
free(purines);
finalize_hdp_structure(hdp);
return nhdp;
}
void serialize_nhdp(NanoporeHDP* nhdp, const char* filepath) {
FILE* out = fopen(filepath, "w");
fprintf(out, "%"PRId64"\n", nhdp->alphabet_size);
fprintf(out, "%s\n", nhdp->alphabet);
fprintf(out, "%"PRId64"\n", nhdp->kmer_length);
serialize_hdp(nhdp->hdp, out);
fclose(out);
}
NanoporeHDP* deserialize_nhdp(const char* filepath) {
FILE* in = fopen(filepath, "r");
char* line = stFile_getLineFromFile(in);
int64_t alphabet_size;
sscanf(line, "%"SCNd64, &alphabet_size);
free(line);
line = stFile_getLineFromFile(in);
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
sscanf(line, "%s", alphabet);
free(line);
line = stFile_getLineFromFile(in);
int64_t kmer_length;
sscanf(line, "%"SCNd64, &kmer_length);
free(line);
HierarchicalDirichletProcess* hdp = deserialize_hdp(in);
fclose(in);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
free(alphabet);
return nhdp;
}
static void nanoporeHdp_checkThreeLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta) {
if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) ||
(middleGammaAlpha == NULL_HYPERPARAMETER) || (middleGammaBeta == NULL_HYPERPARAMETER) ||
(leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base, middle, "
"and the leaf distributions for the prior for this NanoporeHdp");
}
}
static void nanoporeHdp_checkThreeLevelFixedParameters(double baseGamma, double middleGamma, double leafGamma) {
if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER) ||
(middleGamma == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma, middle gamma, and leaf gamma "
"for this NanoporeHdpType\n");
}
}
static void nanoporeHdp_checkTwoLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta,
double leafGammaAlpha, double leafGammaBeta) {
if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) ||
(leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base and the leaf"
"distributions for the prior for this NanoporeHdp");
}
}
static NanoporeHDP *loadNanoporeHdpFromScratch(NanoporeHdpType nHdpType, const char *modelFile, int64_t kmerLength,
double baseGamma, double middleGamma, double leafGamma,
double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta,
double samplingGridStart, double samplingGridEnd,
int64_t samplingGridLength) {
if (nHdpType == singleLevelFixed) {
if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma "
"for this NanoporeHdpType\n");
}
NanoporeHDP *nHdp = flat_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == singleLevelPrior) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == singleLevelPrior2) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == singleLevelPriorEcoli) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPrior2) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPriorEcoli) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == compFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = purine_composition_hdp_model(PURINES, 2, PYRIMIDINES, 4, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd,
samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == compPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = purine_composition_hdp_model_2(PURINES, 2, PYRIMIDINES, 4, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd,
samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == middleNtsFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = middle_2_nts_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == middleNtsPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = middle_2_nts_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == groupMultisetFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
// ACEGOT
// {0, 1, 1, 2, 1, 3}
int64_t groups[6] = {0, 1, 1, 2, 1, 3};
NanoporeHDP *nHdp = group_multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == groupMultisetPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
// ACEGOT
// {0, 1, 1, 2, 1, 3}
int64_t groups[6] = {0, 1, 1, 2, 1, 3};
NanoporeHDP *nHdp = group_multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, groups,
SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
else {
fprintf(stderr, "loadNanoporeHdpFromScratch: - error making HDP from scratch\n");
exit(EXIT_FAILURE);
}
}
void nanoporeHdp_buildNanoporeHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength,
const char *templateModelFile, const char* complementModelFile,
const char *alignments,
const char *templateHDP, const char *complementHDP,
int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose,
double baseGamma, double middleGamma, double leafGamma,
double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta,
double samplingGridStart, double samplingGridEnd,
int64_t samplingGridLength) {
fprintf(stderr, "Building Nanopore HDP\n");
#pragma omp parallel sections
{
{
fprintf(stderr, "Updating Template HDP from alignments...\n");
NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t");
fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdpT);
fprintf(stderr, "Serializing template to %s...\n", templateHDP);
serialize_nhdp(nHdpT, templateHDP);
destroy_nanopore_hdp(nHdpT);
}
#pragma omp section
{
fprintf(stderr, "Updating Complement HDP from alignments...\n");
NanoporeHDP *nHdpC = loadNanoporeHdpFromScratch(type, complementModelFile, kmerLength,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
update_nhdp_from_alignment_with_filter(nHdpC, alignments, FALSE, "c");
fprintf(stderr, "Running Gibbs for complement doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdpC, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdpC);
fprintf(stderr, "Serializing complement to %s...\n", complementHDP);
serialize_nhdp(nHdpC, complementHDP);
destroy_nanopore_hdp(nHdpC);
}
}
}
void nanoporeHdp_buildOneDHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength,
const char *templateModelFile,
const char *alignments,
const char *templateHDP,
int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose,
double baseGamma, double middleGamma, double leafGamma,
double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta,
double samplingGridStart, double samplingGridEnd,
int64_t samplingGridLength) {
fprintf(stderr, "Updating Template HDP from alignments...\n");
NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t");
fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdpT);
fprintf(stderr, "Serializing template to %s...\n", templateHDP);
serialize_nhdp(nHdpT, templateHDP);
destroy_nanopore_hdp(nHdpT);
}
|
GB_unaryop__minv_fp64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint8
// op(A') function: GB_tran__minv_fp64_uint8
// C type: double
// A type: uint8_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint8
(
double *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
observables.h | /***************************************************************************
* Copyright (C) 2009-2013 by Florian Goth *
* fgoth@wthp095 *
* *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: *
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. *
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. *
* * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR *
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, *
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
***************************************************************************/
#ifndef OBSERVABLES_H
#define OBSERVABLES_H
#include "AverageSign.h"
#include "ObservableBase.h"
#include "ObservableContainer.h"
#include "Parameters.h"
#include "Greensfunction.h"
#include "libFourier.h"
#include <valarray>
#ifdef _OPENMP
#include <omp.h>
#endif
#define TWOPI 2.0*M_PIl
template <class C, class S, class Communication>
struct Observable_Parameters
{
typedef C Configuration;
typedef S SignType;
typedef Communication Comm;
};
template <class Configuration>
void densitydensityCorrelation_dry(DryRun<typename Configuration::value_type, typename Configuration::DetType>& func,
int site_i, const typename DryRun<typename Configuration::value_type, typename Configuration::DetType>::FPType s_i, int spin_i,
int site_j, const typename DryRun<typename Configuration::value_type, typename Configuration::DetType>::FPType s_j, int spin_j)
{
const typename DryRun<typename Configuration::value_type, typename Configuration::DetType>::FPType tiny = 0.00000001;
if (spin_i == UP)
{
if (spin_j == DOWN)
{
//UP-DOWN
func.template onSector<UP>(site_i, s_i, site_i, s_i);
func.template onSector<DOWN>(site_j, s_j, site_j, s_j);
}
else
{
//UP-UP
if ((site_i != site_j) || !fpequal(s_i, s_j))
{
func.template onSector<UP>(site_i, s_i, site_i, s_i);
func.template onSector<UP>(site_j, s_j, site_j, s_j);
func.template onSector<UP>(site_j, s_j, site_i, s_i);
func.template onSector<UP>(site_i, s_i, site_j, s_j);
}
else
{
func.template onSector<UP>(site_i, s_i, site_i, s_i);
}
}
}
else
{
if (spin_j == DOWN)
{
if ((site_i != site_j) || !fpequal(s_i, s_j))
{
//DOWN-DOWN
func.template onSector<DOWN>(site_i, s_i, site_i, s_i);
func.template onSector<DOWN>(site_j, s_j, site_j, s_j);
func.template onSector<DOWN>(site_j, s_j, site_i, s_i);
func.template onSector<DOWN>(site_i, s_i, site_j, s_j);
}
else
{
func.template onSector<DOWN>(site_i, s_i, site_i, s_i);
}
}
else
{
//DOWN - UP
func.template onSector<DOWN>(site_i, s_i, site_i, s_i);
func.template onSector<UP>(site_j, s_j, site_j, s_j);
}
}
return;
}
template <class Configuration>
inline typename Configuration::DetType densitydensityCorrelation(const DoWick<typename Configuration::value_type, typename Configuration::DetType>& dowick, int site_i, typename DoWick<typename Configuration::value_type, typename Configuration::DetType>::FPType s_i, int spin_i, int site_j, typename DoWick<typename Configuration::value_type, typename Configuration::DetType>::FPType s_j, int spin_j)
{
typename Configuration::DetType retval;
const typename DoWick<typename Configuration::value_type, typename Configuration::DetType>::FPType tiny = 0.00000001;
if (spin_i == UP)
{
if (spin_j == DOWN)
{
//UP-DOWN
retval = dowick.template onSector<UP>(site_i, s_i, site_i, s_i)
* dowick.template onSector<DOWN>(site_j, s_j, site_j, s_j);
}
else
{
//UP-UP
if ((site_i != site_j) || !fpequal(s_i, s_j))
{
retval = (dowick.template onSector<UP>(site_i, s_i, site_i, s_i)
* dowick.template onSector<UP>(site_j, s_j, site_j, s_j)
- dowick.template onSector<UP>(site_j, s_j, site_i, s_i)
* dowick.template onSector<UP>(site_i, s_i, site_j, s_j));
}
else
{//special care for equaltime greensfunction
retval = dowick.template onSector<UP>(site_i, s_i, site_i, s_i);
}
}
}
else
{
if (spin_j == DOWN)
{
if ((site_i != site_j) || !fpequal(s_i, s_j))
{
//DOWN-DOWN
retval = (dowick.template onSector<DOWN>(site_i, s_i, site_i, s_i)
* dowick.template onSector<DOWN>(site_j, s_j, site_j, s_j)
- dowick.template onSector<DOWN>(site_j, s_j, site_i, s_i)
* dowick.template onSector<DOWN>(site_i, s_i, site_j, s_j)
);
}
else
{//special care for equaltime greensfunction
retval = dowick.template onSector<DOWN>(site_i, s_i, site_i, s_i);
}
}
else
{
//DOWN - UP
retval = dowick.template onSector<DOWN>(site_i, s_i, site_i, s_i)
* dowick.template onSector<UP>(site_j, s_j, site_j, s_j);
}
}
return retval;
}
/**
A "generic" twoparticle Greensfunction of this form:
G = <c^dagger_a c_b c^dagger_c c_d >
*/
template <class Configuration>
void genericTwoParticleGreensfunction_dry(DryRun<typename Configuration::value_type, typename Configuration::DetType>& func,
const typename Configuration::value_type& va,
const typename Configuration::value_type& vb,
const typename Configuration::value_type& vc,
const typename Configuration::value_type& vd)
{
if((vb == vc) && (va == vb) && (vc == vd))
func(va, va);
else
{
func(va, vb);
func(va, vd);
func(vc, vb);
func(vc, vd);
}
return;
}
/**
A "generic" twoparticle Greensfunction:
It is like that:
G = <c^dagger_a c_b c^dagger_c c_d >
*/
template <class Configuration>
typename Configuration::DetType genericTwoParticleGreensfunction(const DoWick<typename Configuration::value_type, typename Configuration::DetType>& dowick,
typename Configuration::value_type va,
typename Configuration::value_type vb,
typename Configuration::value_type vc,
typename Configuration::value_type vd)
{
typename Configuration::DetType retval;
if((vb == vc) && (va == vb) && (vc == vd))
{
retval = dowick(va, va);
}
else
{
retval = dowick(va, vb) * dowick(vc, vd) - dowick(va, vd) * dowick(vc, vb);
}
return retval;
}
/**
A class for measuring the average Order.
*/
template <class Config>
class AverageOrder : public Network_Cache<Config, typename Config::SignType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Config::Configuration::FPType FPType;
typedef typename Config::SignType SignType;
typedef typename Config::SignType ObservableType;///< the AverageOrder is essentially a floating Point type, but during calculations it can be complex
/**
The Constructor for the Average Order
*/
AverageOrder(typename Config::Comm& n) throw() : Network_Cache<Config, typename Config::SignType>(n, "AverageOrder"), averageorder(0.0), configurationLength(0.0)
{
}
inline void dryrun(DryRun<typename Configuration::value_type, SignType>&) {}
/**
This determines the Average Order for the given configuration
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, SignType>&);
private:
SignType averageorder;///< here we store the physical AverageOrder
FPType configurationLength;///< this is the real Average Order of the Data Structure
};
template <class Config>
void AverageOrder<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, SignType>&)
{
FPType currentorder = static_cast<FPType>(configuration.size());
//now update the average order
configurationLength += currentorder;
SignType obs = currentorder * configuration.phase;//as for every other observable, account for the sign of the Configuration
averageorder += obs;
this->add_bin(obs);
return;
}
/**
A class for measuring the ParticleNumber at a certain hardcoded time
*/
template <class Config>
class ParticleNumber : public Network_Cache<Config, typename Config::SignType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType SignType;
typedef SignType ObservableType;///< the ParticleNumber is essentially a floating point type, but during calculations it can be complex
/**
The Constructor for the Average Order
*/
ParticleNumber(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, typename Config::SignType>(n, "ParticleNumber"), t_M(params.t_exp), len(params.N)
{
}
void dryrun(DryRun<typename Configuration::value_type, SignType>&);
/**
This determines the Average Order for the given Configuration
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, SignType>&);
private:
const double& t_M;
const uint32_t& len;
};
template <class Config>
void ParticleNumber<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, SignType>& func)
{
ObservableType obs(func.template onSector<UP>(0, t_M, 0, t_M));
obs += func.template onSector<DOWN>(0, t_M, 0, t_M);
for (unsigned int k = 1; k < len; ++k)
{
obs += func.template onSector<UP>(k, t_M, k, t_M);
obs += func.template onSector<DOWN>(k, t_M, k, t_M);
}
obs *= configuration.phase;
this->add_bin(obs);
return;
}
template <class Config>
void ParticleNumber<Config>::dryrun(DryRun<typename Configuration::value_type, SignType>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int k = 0; k < len; ++k)
{
func.template onSector<UP>(k, t_M, k, t_M);
func.template onSector<DOWN>(k, t_M, k, t_M);
}
}
/**
A class for measuring the Total Double Occupancy at a certain hardcoded time
*/
template <class Config>
class TotalDoubleOccupancy : public Network_Cache<Config, typename Config::SignType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GFRetVal ObservableType;///< the ParticleNumber is essentially a floating Point type, but during calculations it can be complex
/**
The Constructor for the Average Order
*/
TotalDoubleOccupancy(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, typename Config::SignType>(n, "TotalDoubleOccupancy"), t_M(params.t_exp), len(params.N)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
This determines the Average Order for the given Configuration
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const double& t_M;
const uint32_t& len;
};
template <class Config>
void TotalDoubleOccupancy<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
Vertex v0u(0, 0, UP);
Vertex v0d(0, 0, DOWN);
genericTwoParticleGreensfunction_dry<Configuration>(func, v0u, v0u, v0d, v0d);
for (unsigned int k = 1; k < len; ++k)
{
Vertex vu(k, 0, UP);
Vertex vd(k, 0, DOWN);
genericTwoParticleGreensfunction_dry<Configuration>(func, vu, vu, vd, vd);
}
}
template <class Config>
void TotalDoubleOccupancy<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& func)
{
typename Configuration::value_type v0u(0, 0, UP);
typename Configuration::value_type v0d(0, 0, DOWN);
ObservableType obs(genericTwoParticleGreensfunction<Configuration>(func, v0u, v0u, v0d, v0d));
for (unsigned int k = 1; k < len; ++k)
{
typename Configuration::value_type vu(k, 0, UP);
typename Configuration::value_type vd(k, 0, DOWN);
obs += genericTwoParticleGreensfunction<Configuration>(func, vu, vu, vd, vd);
}
// obs += func.template onSector<UP>(k, t_M, k, t_M) * func.template onSector<DOWN>(k, t_M, k, t_M);
obs *= configuration.phase;
this->add_bin(obs);
return;
}
/**
A class for measuring the time dependency of the kinetic energy as defined in the Hubbard model
*/
template <class Config>
class KineticEnergy : public Network_Cache<Config, std::valarray<typename Config::SignType> >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> ObservableType;///< Kinetic Energy is a function-like observable in realtime evolution
/**
The Constructor for the Average Order
*/
KineticEnergy(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "KineticEnergy"), len(params.N), functionpoints(params.functionpoints), t(params.t), delta_s(params.delta_s)
{}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
This determines the Average Order for the given Configuration
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double& t;
const double delta_s;
};
template <class Config>
void KineticEnergy<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
for (unsigned int k = 0; k < len; ++k)
{
func.template onSector<UP>(k, s, (k + 1)%len, s);
func.template onSector<UP>(k, s, (len + k - 1)%len, s);
func.template onSector<DOWN>(k, s, (k + 1)%len, s);
func.template onSector<DOWN>(k, s, (len + k - 1)%len, s);
}
}
}
template <class Config>
void KineticEnergy<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
std::valarray<GFRetVal> vals(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
GFRetVal obs = 0;
for (unsigned int k = 0; k < len; ++k)
{
obs += dowick.template onSector<UP>(k, s, (k + 1)%len, s);
obs += dowick.template onSector<UP>(k, s, (len + k - 1)%len, s);
obs += dowick.template onSector<DOWN>(k, s, (k + 1)%len, s);
obs += dowick.template onSector<DOWN>(k, s, (len + k - 1)%len, s);
}
obs *= static_cast<FPType>(-t) * configuration.phase;
vals[j] = obs;
}
//add to measurement
this->add_bin(vals);
return;
}
/**
A class for measuring the time dependency of the Magnetization
*/
template <class Config>
class Magnetization : public Network_Cache<Config, std::valarray<typename Config::SignType> >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> ObservableType;///< the Magnetization is a function of the time
/**
The Constructor for the Magnetization
*/
Magnetization(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "Magnetization"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Magnetization for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void Magnetization<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
for (unsigned int k = 0; k < len; ++k)
{
func.template onSector<UP>(k, s, k, s);
func.template onSector<DOWN>(k, s, k, s);
}
}
}
template <class Config>
void Magnetization<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
std::valarray<GFRetVal> vals(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
GFRetVal obs = 0;
for (unsigned int k = 0; k < len; ++k)
{
obs += dowick.template onSector<UP>(k, s, k, s) - dowick.template onSector<DOWN>(k, s, k, s);
}
obs *= configuration.phase;
vals[j] = obs;
}
//add to measurement
this->add_bin(vals);
return;
}
/**
A class for measuring the time dependency of the Eta-Pairing
*/
template <class Config>
class EtaPairing : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;//Eta-Pairing in k-space can be complex
typedef std::valarray<Function> ObservableType;///< The Eta-Pairing is a time-dependent
/**
The Constructor for the Eta-Pairing
*/
EtaPairing(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "EtaPairing"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Eta-Pairing for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void EtaPairing<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (int q = 0; q < static_cast<int>(len); ++q)//for every k-space-value q
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every timeslice
{
const FPType s = j * delta_s;//the realtime
for (int a = 0; a < static_cast<int>(len); ++a)
for (int d = 0; d < static_cast<int>(len); ++d)
{
func.template onSector<UP>(a, s, d, s);
func.template onSector<DOWN>(a, s, d, s);
}
}
}
return;
}
template <class Config>
void EtaPairing<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (int q = 0; q < static_cast<int>(len); ++q)//for every k-space-value q
{
func[q].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)//for every timeslice
{
const FPType s = j * delta_s;//the realtime
std::complex<FPType> t1 = 0;
FPType lenf = static_cast<FPType>(len);
for (int a = 0; a < static_cast<int>(len); ++a)
for (int d = 0; d < static_cast<int>(len); ++d)
{
std::complex<FPType> pref = std::exp(std::complex<FPType>(0.0, static_cast<FPType>(TWOPI*q)/lenf*(d-a)) );
t1 += pref * dowick.template onSector<UP>(a, s, d, s) * dowick.template onSector<DOWN>(a, s, d, s);
}
//add to measurement
func[q][j] = t1;
}
func[q] *= configuration.phase;
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Eta-Pairing
*/
template <class Config>
class EtaPairing_Real : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< The Eta-Pairing is a time-dependent observable
/**
The Constructor for the Eta-Pairing
*/
EtaPairing_Real(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "EtaPairing_Real"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Eta-Pairing for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void EtaPairing_Real<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (int q = 0; q < static_cast<int>(len); ++q)//for every lattice site q
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every timeslice
{
const FPType s = j * delta_s;//the realtime
func.template onSector<UP>(q, s, 0, s);
func.template onSector<DOWN>(q, s, 0, s);
}
}
return;
}
template <class Config>
void EtaPairing_Real<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (uint32_t q = 0; q < len; ++q)
{
func[q].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)//for every timeslice
{
const FPType s = j * delta_s;//the realtime
GFRetVal t1 = dowick.template onSector<UP>(q, s, 0, s) * dowick.template onSector<DOWN>(q, s, 0, s);
//add to measurement
func[q][j] = t1;
}
func[q] *= configuration.phase;
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Spin-Spin-Correlation
*/
template <class Config>
class SpinSpinCorrelation : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Spin-Spin-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the Spin-Spin-Correlation
*/
SpinSpinCorrelation(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSpinCorrelation"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Spin-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void SpinSpinCorrelation<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, DOWN);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, DOWN);
}
}
return;
}
template <class Config>
void SpinSpinCorrelation<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal t1 = densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, UP)
- densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, DOWN)
- densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, DOWN);
//add to measurement
func[k][j] = t1 * configuration.phase/ static_cast<FPType>(4.0);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the correlated part of the Spin-Spin-Correlation
*/
template <class Config>
class SpinSpinCorrelatedPart : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Spin-Spin-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the correlated part Spin-Spin-Correlation
*/
inline SpinSpinCorrelatedPart(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSpinCorrelatedPart"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Spin-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void SpinSpinCorrelatedPart<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, DOWN);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, DOWN);
func.template onSector<UP>(0, s, 0, s);
func.template onSector<DOWN>(k, s, k, s);
}
}
return;
}
template <class Config>
void SpinSpinCorrelatedPart<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal au = dowick.template onSector<UP>(0, s, 0, s);
GFRetVal ad = dowick.template onSector<DOWN>(0, s, 0, s);
GFRetVal bu = dowick.template onSector<UP>(k, s, k, s);
GFRetVal bd = dowick.template onSector<DOWN>(k, s, k, s);
GFRetVal t1 = densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, UP)
- densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, DOWN)
- densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, DOWN)
-au*bu + au*bd + ad * bu - ad * bd;
//add to measurement
func[k][j] = t1 * configuration.phase/ static_cast<FPType>(4.0);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Charge-Charge-Correlation
*/
template <class Config>
class ChargeChargeCorrelation : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Charge-Charge-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the Charge-Charge-Correlation
*/
ChargeChargeCorrelation(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "ChargeChargeCorrelation"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Charge-Charge-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void ChargeChargeCorrelation<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, DOWN);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, DOWN);
}
}
return;
}
template <class Config>
void ChargeChargeCorrelation<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal t1 = densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, DOWN)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, DOWN);
//add to measurement
func[k][j] = t1 * configuration.phase;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the correlated part of the Charge-Charge-Correlation
*/
template <class Config>
class ChargeChargeCorrelatedPart : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Charge-Charge-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the correlated part of the Charge-Charge-Correlation
*/
ChargeChargeCorrelatedPart(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "ChargeChargeCorrelatedPart"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the correlated part of the Charge-Charge-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void ChargeChargeCorrelatedPart<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, UP, k, s, DOWN);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, 0, s, DOWN, k, s, DOWN);
func.template onSector<UP>(0, s, 0, s);
func.template onSector<DOWN>(k, s, k, s);
}
}
return;
}
template <class Config>
void ChargeChargeCorrelatedPart<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal au = dowick.template onSector<UP>(0, s, 0, s);
GFRetVal ad = dowick.template onSector<DOWN>(0, s, 0, s);
GFRetVal bu = dowick.template onSector<UP>(k, s, k, s);
GFRetVal bd = dowick.template onSector<DOWN>(k, s, k, s);
GFRetVal t1 = densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, UP, k, s, DOWN)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, 0, s, DOWN, k, s, DOWN)
- au*bu
- ad*bu
- au*bd
- ad*bd;
//add to measurement
func[k][j] = t1 * configuration.phase;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the k-space resolved particle density. the dependency on the spin is summed out.
It measures \sum_r <c_r^\dagger c_0 >
*/
template <class Config>
class kSpaceDensity : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< Charge-Charge-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the Charge-Charge-Correlation
*/
kSpaceDensity(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "kSpaceDensity"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Charge-Charge-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void kSpaceDensity<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int r = 0; r < len; ++r)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
func.template onSector<UP>(r, s, 0, s);
func.template onSector<DOWN>(r, s, 0, s);
}
}
return;
}
template <class Config>
void kSpaceDensity<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
std::complex<FPType> t1 = 0;
for (unsigned int r = 0; r < len; ++r)
{
std::complex<FPType> t2 = dowick.template onSector<UP>(r, s, 0, s) + dowick.template onSector<DOWN>(r, s, 0, s);
t2 = t2 * std::exp(std::complex<FPType>(0.0, static_cast<FPType>(2.0 * M_PIl/len) * k * r) );
t1 += t2;
}
//add to measurement
func[k][j] = t1 * configuration.phase;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the LocalDensityVariance
*/
template <class Config>
class LocalDensityVariance : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< LocalDensityVariance is a spatially resolved time-dependent observable
/**
The Constructor for the LocalDensityVariance
*/
LocalDensityVariance(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "LocalDensityVariance"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the LocalDensityVariance for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void LocalDensityVariance<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
func.template onSector<UP>(k, s, k, s);
func.template onSector<DOWN>(k, s, k, s);
densitydensityCorrelation_dry<Configuration>(func, k, s, UP, k, s, DOWN);
}
}
return;
}
template <class Config>
void LocalDensityVariance<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal nup = dowick.template onSector<UP>(k, s, k, s);
GFRetVal ndown = dowick.template onSector<UP>(k, s, k, s);
GFRetVal nupndown = densitydensityCorrelation<Configuration>(dowick, k, s, UP, k, s, DOWN);
//add to measurement
func[k][j] = configuration.phase * (nup*(static_cast<FPType>(1.0) - nup) + ndown*(static_cast<FPType>(1.0) - ndown) + static_cast<FPType>(2.0) * (nupndown - nup*ndown));
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Greensfunctions, thus <c_0(0)^\dagger c_r(s)>,
WE MEASURE THE UP-SECTOR!
*/
template <class Config>
class Greensfunction : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
Greensfunction(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType> (n, "Greensfunction"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void Greensfunction<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
/* typename Configuration::value_type v1(k, s, UP);
typename Configuration::value_type v2(0, 0, DOWN);
func(v1, v2);*/
func.template onSector<UP>(k, s, 0, 0);
// func.template onSector<DOWN>(k, s, 0, 0);
}
}
return;
}
template <class Config>
void Greensfunction<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
/*
typename Configuration::value_type v1(k, s, UP);
typename Configuration::value_type v2(0, 0, DOWN);
GFRetVal t1 = dowick(v1, v2);*/
GFRetVal t1 = dowick.template onSector<UP>(k, s, 0, 0)
//+dowick.template onSector<DOWN>(k, s, 0, 0)
;
//add to measurement
func[k][j] = configuration.phase * t1;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the Diagonal Green's function <c_ks(tau)^\dagger c_ks(0)>,
We employ Time-reversal symmetry to enhance the measurement. In theory it is always a real quantity
*/
template <class Config>
class DiagonalGreensfunction_kspace : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
DiagonalGreensfunction_kspace(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType> (n, "Greensfunction"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void DiagonalGreensfunction_kspace<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int r = 0; r < len; ++r)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
func.template onSector<UP>(r, s, 0, 0);
func.template onSector<DOWN>(r, s, 0, 0);
}
}
return;
}
template <class Config>
void DiagonalGreensfunction_kspace<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
FPType invlen = static_cast<FPType>(1.0)/len;
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
std::complex<FPType> sum = 0;
for(uint r = 0; r < len; ++r)
{
std::complex<FPType> a = std::exp(std::complex<FPType>(0.0, -static_cast<FPType>(TWOPI*k*r)*invlen));
GFRetVal t1 = dowick.template onSector<UP>(r, s, 0, 0) + dowick.template onSector<DOWN>((len-r)%len, s, 0, 0);
sum += a * t1;
}
func[k][j] = 0.5*configuration.phase * sum /* invlen*/;//not sure about that invlen here...
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the TRI Diagonal Green's function <c_ks(tau)^\dagger c_ks(0)>,
s = +-
*/
template <class Config, int sign>
class TRI_Greensfunction_kspace : public Network_Cache<Config, std::valarray<std::valarray<
//typename Config::Configuration::FPType
typename Config::SignType
> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;//G++ and G-- are real
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
TRI_Greensfunction_kspace(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType> (n, "TRI_Greensfunction_kspace"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config, int sign>
void TRI_Greensfunction_kspace<Config, sign>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typename Configuration::value_type v2(0, 0, DOWN);
typename Configuration::value_type v4(0, 0, UP);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
for(uint r = 0; r < len; ++r)
{
typename Configuration::value_type v1(r, s, UP);
typename Configuration::value_type v3(r, s, DOWN);
func.template onSector<UP>(r, s, 0, 0);
// func.template onSector<DOWN>(r, s, 0, 0);
func(v1, v2);
// func(v3, v4);
}
}
return;
}
template <class Config, int sign>
void TRI_Greensfunction_kspace<Config, sign>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
FPType invlen = static_cast<FPType>(1.0)/len;
typename Configuration::value_type v2(0, 0, DOWN);
typename Configuration::value_type v4(0, 0, UP);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal sum = 0;
for(uint r = 0; r < len; ++r)
{
std::complex<FPType> a = std::exp(std::complex<FPType>(0.0, -static_cast<FPType>(TWOPI*k*r)*invlen));
typename Configuration::value_type v1(r, s, UP);
typename Configuration::value_type v3(r, s, DOWN);
// std::complex<FPType> t1 = dowick.template onSector<UP>(r, s, 0, 0) + dowick.template onSector<DOWN>(r, s, 0, 0)
// + std::complex<FPType>(0.0, sign)*( dowick(v1, v2) - dowick(v3, v4));
//the next line is equivalent down to the Monte-Carlo level it seems...
// std::complex<FPType> t1 = dowick.template onSector<UP>(r, s, 0, 0) + std::complex<FPType>(0.0, sign)*dowick(v1,v2);
GFRetVal gd = dowick.template onSector<UP>(r, s, 0, 0);//measure diagonal
GFRetVal go = dowick(v1, v2);//measure offdiagonal
sum += real(a)*gd - sign * imag(a) * go;
}
func[k][j] = configuration.phase * sum;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the Diagonal Green's function <c_ks(tau)^\dagger c_k(-s)(0)>,
We employ Time-reversal symmetry to enhance the measurement.
*/
template <class Config>
class OffDiagonalGreensfunction_kspace : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
OffDiagonalGreensfunction_kspace(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType> (n, "Greensfunction"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void OffDiagonalGreensfunction_kspace<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typename Configuration::value_type v2(0, 0, DOWN);
typename Configuration::value_type v4(0, 0, UP);
for (unsigned int r = 0; r < len; ++r)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
typename Configuration::value_type v1(r, s, UP);
func(v1, v2);
typename Configuration::value_type v3(r, s, DOWN);
func(v3, v4);
}
}
return;
}
template <class Config>
void OffDiagonalGreensfunction_kspace<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
FPType invlen = static_cast<FPType>(1.0)/len;
typename Configuration::value_type v2(0, 0, DOWN);
typename Configuration::value_type v4(0, 0, UP);
const FPType fac = TWOPI * invlen;
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
std::complex<FPType> sum = 0;
for(uint r = 0; r < len; ++r)
{
std::complex<FPType> pref = std::exp(std::complex<FPType>(0.0, -fac * (k*r)));
typename Configuration::value_type v1(r, s, UP);
typename Configuration::value_type v3((len - r)%len, s, DOWN);
GFRetVal t1 = dowick(v1, v2) + dowick(v3, v4);
sum += pref * t1;
}
func[k][j] = 0.5*configuration.phase * sum;//normalization not necessary since the 1/N factor is already in the definition of G_0
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Greensfunctions, thus <c_0(0)^\dagger c_r(s)>.
This Greensfunction only has sense for imaginary time measurements, because we use David's smoothing trick.
WE MEASURE THE DOWN-SECTOR!
*/
template <class Config>
class SmoothImaginaryGreensfunction : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
SmoothImaginaryGreensfunction(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SmoothImaginaryGreensfunction"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
const double beta;
};
template <class Config>
void SmoothImaginaryGreensfunction<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
for (unsigned int i = 0; i < trunc(beta/delta_s); ++i)
{
// func.template onSector<UP>(k, s, 0, 0);
func.template onSector<DOWN>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s);
}
}
}
return;
}
template <class Config>
void SmoothImaginaryGreensfunction<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
GFRetVal sum = 0;
for (unsigned int i = 0; i < trunc(beta/delta_s); ++i)
{
sum += /*dowick.template onSector<UP>(k, s, 0, 0)
+*/ dowick.template onSector<DOWN>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s) * delta_s
;
}
//add to measurement
func[k][j] = configuration.phase * sum/beta;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Greensfunctions, thus <c_0(0)^\dagger c_r(s)>.
This Greensfunction only has sense for imaginary time measurements, because we use David's smoothing trick.
Here we average over both Spin sectors
*/
template <class Config>
class SmoothImaginaryGreensfunction_averaged : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< The Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
SmoothImaginaryGreensfunction_averaged(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SmoothImaginaryGreensfunction_averaged"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
const double beta;
};
template <class Config>
void SmoothImaginaryGreensfunction_averaged<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
for (unsigned int i = 0; i < trunc(beta/delta_s); ++i)
{
func.template onSector<DOWN>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s);
func.template onSector<UP>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s);
}
}
}
return;
}
template <class Config>
void SmoothImaginaryGreensfunction_averaged<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
GFRetVal sum = 0;
for (unsigned int i = 0; i < trunc(beta/delta_s); ++i)
{
sum += (dowick.template onSector<DOWN>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s) + dowick.template onSector<UP>(0, static_cast<FPType>(i+j)*delta_s, k, i * delta_s)) * delta_s;
}
//add to measurement
func[k][j] = configuration.phase * sum/beta/2.0;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the Matsubarafrequency Greensfunction thus <c_0(0)^\dagger c_r(i omega)>,
WE MEASURE THE UP-SECTOR!
*/
template <class Config, class Greensfunction>
class MatsubaraFrequencyGreensfunction : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< The Matsubarafrequency dependent Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
MatsubaraFrequencyGreensfunction(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "MatsubaraFrequencyGreensfunction"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) MTL_CONST_FUNCTION;
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const FPType delta_s;
const FPType beta;
};
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunction<Config, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>&)
{
}
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunction<Config, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>&)
{
ObservableType func(len);
typename Greensfunction::Vertex v;
v.spin = UP;
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int r = 0; r < configuration.size(); ++r)
for (unsigned int s = 0; s < configuration.size(); ++s)
{
const FPType deltars = configuration[r].tau - configuration[s].tau;
std::complex<FPType> inc = std::exp(std::complex<FPType>(0.0, deltars * M_PI / beta));
const std::complex<FPType> fac = inc * inc;
inc *= configuration.matcont(r, s, UP, UP);
for (unsigned int n = 0; n < functionpoints; ++n)
{
func[k][n] += inc;
inc *= fac;
}
}
for (unsigned int n = 0; n < functionpoints; ++n)
{
FPType omegan = M_PI/beta*static_cast<FPType>(2*n+1);
std::complex<FPType> gomegan = Greensfunction::gomega(omegan, v, v);
func[k][n] = configuration.phase * gomegan * (static_cast<FPType>(1.0) - gomegan*func[k][n]/beta);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the averaged Matsubarafrequency Greensfunction thus sum_\sigma <c_0(0)^\dagger c_r(i omega)>,
*/
template <class Config, class Greensfunction>
class MatsubaraFrequencyGreensfunction_averaged : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< The Matsubarafrequency dependent Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
MatsubaraFrequencyGreensfunction_averaged(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "MatsubaraFrequencyGreensfunction_averaged"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) MTL_CONST_FUNCTION;
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const FPType delta_s;
const FPType beta;
};
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunction_averaged<Config, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>&)
{
}
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunction_averaged<Config, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>&)
{
ObservableType func(len);
typename Greensfunction::Vertex vup;
typename Greensfunction::Vertex vdown;
vup.spin = UP;
vdown.spin = DOWN;
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
std::complex<FPType> gup[functionpoints];
std::complex<FPType> gdown[functionpoints];
for (unsigned int n = 0; n < functionpoints; ++n)
{
FPType omegan = M_PI/beta*static_cast<FPType>(2*n+1);
std::complex<FPType> gomeganup = Greensfunction::gomega(omegan, vup, vup);
std::complex<FPType> gomegandown = Greensfunction::gomega(omegan, vdown, vdown);
gup[n] = gomeganup;
gdown[n] = gomegandown;
}
for (unsigned int r = 0; r < configuration.size(); ++r)
for (unsigned int s = 0; s < configuration.size(); ++s)
{
const FPType deltars = configuration[r].tau - configuration[s].tau;
std::complex<FPType> inc = std::exp(std::complex<FPType>(0.0, deltars * M_PI / beta));
const std::complex<FPType> fac = inc * inc;
// inc *= configuration.matcont.mat(2*r, 2*s);
for (unsigned int n = 0; n < functionpoints; ++n)
{
func[k][n] += (gup[n]*gup[n]*configuration.matcont.mat(2*r, 2*s) + gdown[n]*gdown[n]*configuration.matcont.mat(2*r+1, 2*s+1))*inc;
inc *= fac;
}
}
for (unsigned int n = 0; n < functionpoints; ++n)
{
func[k][n] = configuration.phase * (gup[n] + gdown[n] - func[k][n]/beta);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the derivative with respect to omega of the Matsubarafrequency Greensfunction thus d/domega <c_0(0)^\dagger c_r(i omega)>,
WE MEASURE THE UP-SECTOR!
*/
template <class Config, class Greensfunction>
class MatsubaraFrequencyGreensfunctionDerivative : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;
typedef std::valarray<Function> ObservableType;///< The Matsubarafrequency dependent Greensfunction contains an array of functions
/**
The Constructor for the Greensfunction
*/
MatsubaraFrequencyGreensfunctionDerivative(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "MatsubaraFrequencyGreensfunctionDerivative"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) MTL_CONST_FUNCTION;
/**
Here we evaluate for a given order the values of all Greensfunctions
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const FPType delta_s;
const FPType beta;
};
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunctionDerivative<Config, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>&)
{
}
template <class Config, class Greensfunction>
void MatsubaraFrequencyGreensfunctionDerivative<Config, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>&)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
Function temp(functionpoints);
/* for (unsigned int n = 0; n < functionpoints; ++n)
{
const FPType omegan = M_PI/beta*static_cast<FPType>(2*n+1);
const std::complex<FPType> gomegan = CRAPPY_Make_compile_Helper<Greensfunction, Configuration>::measure(omegan, beta, v, w, ed, mu);
for(unsigned int r = 0; r < configuration.size(); ++r)
{
for(unsigned int s = 0; s < configuration.size(); ++s)
func[k][n] += std::exp(std::complex<FPType>(0.0, omegan * (configuration[r].tau - configuration[s].tau))) * configuration.up.inverse(r,s);
}
func[k][n] /= beta;
func[k][n] *= gomegan;
func[k][n] = configuration.phase * gomegan * (1.0 - func[k][n]);
}*/
for (unsigned int r = 0; r < configuration.size(); ++r)
for (unsigned int s = 0; s < configuration.size(); ++s)
{
const FPType deltars = configuration[r].tau - configuration[s].tau;
std::complex<FPType> inc1 = std::exp(std::complex<FPType>(0.0, deltars * M_PI / beta));
const std::complex<FPType> fac = inc1 * inc1;
inc1 *= configuration.matcont.up(r, s);
std::complex<FPType> inc2 = inc1 * std::complex<FPType>(0.0, deltars);
for (unsigned int n = 0; n < functionpoints; ++n)
{
func[k][n] += inc1;//func[k][n] is the sum, that is the same as in the plain Matsubara Greensfunction
temp[n] += inc2;
inc1 *= fac;
inc2 *= fac;
}
}
for (unsigned int n = 0; n < functionpoints; ++n)
{
FPType omegan = M_PI/beta*static_cast<FPType>(2*n+1);
const FPType tiny = 0.00000001;//same as in the greensfunctions
const FPType h = pow(tiny, 1.0/3.0) * 0.707 * omegan;//an optimal choice of h for the symmetric derivative derived for a function that behaves as 1/x... see NR 5.7
std::complex<FPType> gomegan = Greensfunction::gomega(omegan, UP);
std::complex<FPType> gomeganplush = Greensfunction::gomega(omegan + h, UP);
std::complex<FPType> gomeganminush = Greensfunction::gomega(omegan - h, UP);
std::complex<FPType> derivative = (gomeganplush - gomeganminush)/(static_cast<FPType>(2.0)*h);
func[k][n] = configuration.phase * (derivative - static_cast<FPType>(2.0)/beta * gomegan * derivative * func[k][n] - static_cast<FPType>(1.0)/beta* gomegan * gomegan * temp[n]);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the Density-Density-StructureFactor
*/
template <class Config>
class DensityDensityStructureFactor : public Network_Cache<Config, std::valarray<std::valarray<std::complex<typename Config::Configuration::FPType> > > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<std::complex<FPType> > Function;//the density-density structure factor is determined by a Fourier-transform. Therefore it can be a complex type
typedef std::valarray<Function> ObservableType;///< the density-density-structure-factor is a k-space resolved time-dependent observables
/**
The Constructor for the DensityDensityStructureFactor
*/
DensityDensityStructureFactor(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "DensityDensityStructureFactor"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
This determines the DensityDensityStructureFactor for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void DensityDensityStructureFactor<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)
{
for (unsigned int a = 0; a < len; ++a)
{
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, a, s, UP, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, a, s, UP, k, s, DOWN);
densitydensityCorrelation_dry<Configuration>(func, a, s, DOWN, k, s, UP);
densitydensityCorrelation_dry<Configuration>(func, a, s, DOWN, k, s, DOWN);
}
}
}
return;
}
template <class Config>
void DensityDensityStructureFactor<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
std::complex<FPType> t1 = 0;
FPType lenf = static_cast<FPType>(len);
for (int a = 0; a < static_cast<int>(len); ++a)
for (int d = 0; d < static_cast<int>(len); ++d)
{
std::complex<FPType> pref = std::exp(std::complex<FPType>(0.0, static_cast<FPType>(TWOPI*k)/lenf*(d-a)) );
t1 += pref * (densitydensityCorrelation<Configuration>(dowick, a, s, UP, d, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, a, s, UP, d, s, DOWN)
+ densitydensityCorrelation<Configuration>(dowick, a, s, DOWN, d, s, UP)
+ densitydensityCorrelation<Configuration>(dowick, a, s, DOWN, d, s, DOWN));
}
//add to measurement
func[k][j] = t1 * configuration.phase/lenf;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the DoubleOccupancy
*/
template <class Config>
class DoubleOccupancy : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< DoubleOccupancy is a spatially resolved time-dependent observable
/**
The Constructor for the DoubleOccupancy
*/
DoubleOccupancy(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config,ObservableType>(n, "DoubleOccupancy"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the DoubleOccupancy for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void DoubleOccupancy<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, k, s, DOWN, k, s, UP);
}
}
return;
}
template <class Configuration>
void DoubleOccupancy<Configuration>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal t1 = densitydensityCorrelation<Configuration>(dowick, k, s, DOWN, k, s, UP);
//add to measurement
func[k][j] = t1 * configuration.phase;
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the Conductance in an imaginary time simulation
*/
#include "conductance_grid.h"
template <class Config, class Greensfunction>
class Conductance : public Network_Cache<Config, typename Config::SignType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef GFRetVal ObservableType;
Conductance(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "Conductance"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s), beta(params.beta), alpha_max(sizeof(grid)/sizeof(Point2)), gamma(params.V*params.V*M_PI/params.W)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) MTL_CONST_FUNCTION;
/**
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
const FPType beta;
const unsigned int alpha_max;
const FPType gamma;
};
template <class Configuration, class Greensfunction>
void Conductance<Configuration, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
return;
}
template <class Configuration, class Greensfunction>
void Conductance<Configuration, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType ret = 0.0;
for (unsigned int alpha = 0; alpha < alpha_max; ++alpha)
{
const FPType omega_alpha = 1.0/(grid[alpha].gp*beta);
const std::complex<FPType> gomega = Greensfunction::gomega(omega_alpha, UP);
const FPType tiny = 0.00000001;//same as in the greensfunctions
const FPType h = pow(tiny, 1.0/3.0) * 0.707 * omega_alpha;//an optimal choice of h for the symmetric derivative, derived for a function that behaves as 1/x... see NR 5.7. the choice of x_c= 0.707*x is reasonable as we always evaluate the function for x > 0
std::complex<FPType> gomegaplush = Greensfunction::gomega(omega_alpha + h, UP);
std::complex<FPType> gomegaminush = Greensfunction::gomega(omega_alpha - h, UP);
std::complex<FPType> derivative = (gomegaplush - gomegaminush)/(static_cast<FPType>(2.0)*h);
std::complex<FPType> sum1 = 0.0;
std::complex<FPType> sum2 = 0.0;
for (unsigned int r = 0; r < configuration.size(); ++r)
{
for (unsigned int s = 0; s < configuration.size(); ++s)
{
std::complex<FPType> expfac = std::exp(std::complex<FPType>(0.0, omega_alpha * (configuration[r].tau - configuration[s].tau)));
GFRetVal mat = configuration.matcont.up(r,s);
sum1 += mat*expfac;
sum2 += mat*expfac * std::complex<FPType>(0.0, (configuration[r].tau - configuration[s].tau));
}
}
std::complex<FPType> factor = gomega / beta;
std::complex<FPType> dga = derivative - static_cast<FPType>(2.0) * factor * derivative * sum1 - factor *gomega * sum2;
ret += imag(dga) * grid[alpha].weight;
}
this->add_bin(-configuration.phase * 2.0*gamma*ret/beta);
}
/**
A class for measuring the time dependency of the correlated part of the Spin-Spin-Correlation
*/
template <class Config>
class SpinSpinCorrelatedPart_Y : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Spin-Spin-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the correlated part Spin-Spin-Correlation
*/
inline SpinSpinCorrelatedPart_Y(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSpinCorrelatedPart_Y"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Spin-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void SpinSpinCorrelatedPart_Y<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
const FPType s = j * delta_s;
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, UP), Vertex(0, 0, DOWN));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, UP), Vertex(0, 0, DOWN));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, DOWN), Vertex(0, 0, UP));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, DOWN), Vertex(0, 0, UP));
func(Vertex(k, s, UP), Vertex(k, s, DOWN));
func(Vertex(k, s, DOWN), Vertex(k, s, UP));
func(Vertex(0, 0, UP), Vertex(0, 0, DOWN));
func(Vertex(0, 0, DOWN), Vertex(0, 0, UP));
}
}
return;
}
template <class Config>
void SpinSpinCorrelatedPart_Y<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
typedef typename Configuration::value_type Vertex;
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal retval = -(genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, UP), Vertex(0, 0, DOWN))
-genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, UP), Vertex(0, 0, DOWN))
-genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, DOWN), Vertex(0, 0, UP))
+genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, DOWN), Vertex(0, 0, UP)))
+/*subtract correlated part, additional minus sign due to i^2*/
(dowick(Vertex(k, s, UP), Vertex(k, s, DOWN))-
dowick(Vertex(k, s, DOWN), Vertex(k, s, UP)))*
(dowick(Vertex(0, 0, UP), Vertex(0, 0, DOWN))-
dowick(Vertex(0, 0, DOWN), Vertex(0, 0, UP)));
//add to measurement
func[k][j] = retval * configuration.phase/ static_cast<FPType>(4.0);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the correlated part of the Spin-Spin-Correlation
*/
template <class Config>
class SpinSpinCorrelatedPart_X : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Spin-Spin-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the correlated part Spin-Spin-Correlation
*/
inline SpinSpinCorrelatedPart_X(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSpinCorrelatedPart_X"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Spin-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void SpinSpinCorrelatedPart_X<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
const FPType s = j * delta_s;
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, UP), Vertex(0, 0, DOWN));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, UP), Vertex(0, 0, DOWN));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, DOWN), Vertex(0, 0, UP));
genericTwoParticleGreensfunction_dry<Configuration>(func, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, DOWN), Vertex(0, 0, UP));
func(Vertex(k, s, UP), Vertex(k, s, DOWN));
func(Vertex(k, s, DOWN), Vertex(k, s, UP));
func(Vertex(0, 0, UP), Vertex(0, 0, DOWN));
func(Vertex(0, 0, DOWN), Vertex(0, 0, UP));
}
}
return;
}
template <class Config>
void SpinSpinCorrelatedPart_X<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
typedef typename Configuration::value_type Vertex;
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
/* std::cout<<genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, UP), Vertex(0, 0, DOWN))
<<" "<<genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, UP), Vertex(0, 0, DOWN))<<" "
<<genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, DOWN), Vertex(0, 0, UP))<<" "
<<genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, DOWN), Vertex(0, 0, UP))<<std::endl;*/
//simplified measurement in case of spin-diagonal measurement.
GFRetVal retval = dowick(Vertex(k,s,UP), Vertex(k,0,UP)) * dowick(Vertex(k,0,DOWN), Vertex(k, s, DOWN)) +
dowick(Vertex(k, s, DOWN), Vertex(k, 0, DOWN)) * dowick(Vertex(k, 0, UP), Vertex(k, s, UP))
/*(
genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, UP), Vertex(0, 0, DOWN))
+genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, UP), Vertex(0, 0, DOWN))
+genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, UP), Vertex(k, s, DOWN), Vertex(0, 0, DOWN), Vertex(0, 0, UP))
+genericTwoParticleGreensfunction<Configuration>(dowick, Vertex(k, s, DOWN), Vertex(k, s, UP), Vertex(0, 0, DOWN), Vertex(0, 0, UP))
)*/
//subtract correlated part
/* -(dowick(Vertex(k, s, UP), Vertex(k, s, DOWN))+
dowick(Vertex(k, s, DOWN), Vertex(k, s, UP)))*
(dowick(Vertex(0, 0, UP), Vertex(0, 0, DOWN))+
dowick(Vertex(0, 0, DOWN), Vertex(0, 0, UP)))*/;
//add to measurement
// std::cout<<retval<<std::endl;
func[k][j] = retval * configuration.phase/ static_cast<FPType>(4.0);
}
}
this->add_bin(func);
return;
}
/**
A class for measuring the time dependency of the correlated part of the Spin-Spin-Correlation
*/
template <class Config>
class ImaginarySpinSpinCorrelation_Z : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///< Spin-Spin-Correlations are spatially resolved time-dependent observables
/**
The Constructor for the correlated part Spin-Spin-Correlation
*/
inline ImaginarySpinSpinCorrelation_Z(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "ImaginarySpinSpinCorrelation_Z"), len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Spin-Correlation for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void ImaginarySpinSpinCorrelation_Z<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typedef typename Configuration::value_type Vertex;
for (unsigned int j = 0; j < functionpoints; ++j)//for every time-slice
{
for (unsigned int k = 0; k < len; ++k)//for each site
{
const FPType s = j * delta_s;
densitydensityCorrelation_dry<Configuration>(func, k, s, UP, 0, 0, UP);
densitydensityCorrelation_dry<Configuration>(func, k, s, DOWN, 0, 0, DOWN);
densitydensityCorrelation_dry<Configuration>(func, k, s, UP, 0, 0, DOWN);
densitydensityCorrelation_dry<Configuration>(func, k, s, DOWN, 0, 0, UP);
}
}
return;
}
template <class Config>
void ImaginarySpinSpinCorrelation_Z<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
typedef typename Configuration::value_type Vertex;
ObservableType func(len);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
const FPType s = j * delta_s;
GFRetVal retval = densitydensityCorrelation<Configuration>(dowick, k, s, UP, 0, 0, UP)
+ densitydensityCorrelation<Configuration>(dowick, k, s, DOWN, 0, 0, DOWN)
- densitydensityCorrelation<Configuration>(dowick, k, s, UP, 0, 0, DOWN)
- densitydensityCorrelation<Configuration>(dowick, k, s, DOWN, 0, 0, UP);
//add to measurement
func[k][j] = retval * configuration.phase/ static_cast<FPType>(4.0);
}
}
this->add_bin(func);
return;
}
/**
The Spin-Susceptiblity in X direction
*/
template <class Config, class Greensfunction>
class SpinSusceptibility_X : public Network_Cache<Config, typename Greensfunction::GOmegaRetType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef typename Greensfunction::GOmegaRetType GOmegaRetType;
typedef GOmegaRetType ObservableType;
/**
*/
inline SpinSusceptibility_X(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSusceptibility_X"), len(params.N), beta(params.beta)
{
myf = new GOmegaRetType*[8];
delta_tau = beta/points;
const uint tablesize = 2 * points;
for (uint k = 0; k < 8; ++k)
{
myf[k] = new GOmegaRetType[tablesize];
memset(myf[k], 0, tablesize * sizeof(GOmegaRetType));
}
for (uint n = 0; n < points; ++n)
{
FPType omegan = M_PI/beta*(2*n+1);
for (int s = 0; s < 8; ++s)
{
SPINS sigma = (s>>2&1? DOWN: UP);
SPINS sigma_r = (s>>1&1? DOWN: UP);
SPINS sigma_s = (s&1? DOWN: UP);
typename Greensfunction::Vertex v1;
typename Greensfunction::Vertex v2;
v1.spin = sigma_r;
v2.spin = !sigma;
GOmegaRetType goma = Greensfunction::gomega(omegan, v1, v2);
GOmegaRetType gomam = Greensfunction::gomega(-omegan, v1, v2);
v1.spin = sigma;
v2.spin = sigma_s;
GOmegaRetType gomb = Greensfunction::gomega(omegan, v1, v2);
GOmegaRetType gombm = Greensfunction::gomega(-omegan, v1, v2);
for (uint k = 0; k < points; ++k)
{
FPType tau = k*delta_tau;
std::complex<FPType> expt = exp(std::complex<FPType>(0.0, omegan * tau));
std::complex<FPType> expmt = std::conj(expt);
accessphi(sigma, sigma_r, sigma_s, k) += goma * gomb * expmt + gomam*gombm*expt;
accessphi(sigma, sigma_r, sigma_s, points + k) += goma * gomb * expt + gomam*gombm*expmt;
}
}
}
for (uint s = 0; s < 8; ++s)
for (uint k = 0; k < tablesize; ++k)
myf[s][k] /= beta;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the Spin-Susceptiblity for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && (GCC_VERSION > GCC_VER(4,5,0))
static constexpr uint points = 1000;
#else
static const uint points = 1000;
#endif
const uint32_t& len;
FPType beta;
FPType delta_tau;
GOmegaRetType** myf;
GOmegaRetType& accessphi(SPINS sigma, SPINS sigma_r, SPINS sigma_s, uint tau_idx)
{
return myf[
(sigma == UP ? 0: 4) +
(sigma_r == UP ? 0: 2) +
(sigma_s == UP ? 0 : 1)
][tau_idx];
}
GOmegaRetType contrib(SPINS sigma, SPINS sigmaprime, const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
typename Greensfunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
typename Greensfunction::Vertex v2;
v2.spin = !sigmaprime;
v2.tau = 0;
GOmegaRetType sum1 = dowick(v1, v2);
v1.spin = sigma;
v2.spin = !sigma;
GFRetVal gf1 = beta * Greensfunction::eval(v1, v2);
GOmegaRetType suma = 0.0;
for (uint r = 0; r < config.size(); ++r)
for (uint s = 0; s < config.size(); ++s)
{
FPType taurs = config[r].tau - config[s].tau;
uint tau_idx = 0;
if (taurs < 0)
{
tau_idx += points;
taurs = -taurs;
}
tau_idx += static_cast<uint>(trunc(taurs/delta_tau));
suma += config.matcont(r, s, UP, UP) * accessphi(sigma, UP, UP, tau_idx);
suma += config.matcont(r, s, DOWN, UP) * accessphi(sigma, DOWN, UP, tau_idx);
suma += config.matcont(r, s, UP, DOWN) * accessphi(sigma, UP, DOWN, tau_idx);
suma += config.matcont(r, s, DOWN, DOWN) * accessphi(sigma, DOWN, DOWN, tau_idx);
}
sum1 *= (GOmegaRetType(gf1) - suma);
GOmegaRetType sum2 = accessphi(sigma, sigmaprime, !sigmaprime, 0);
uint twosize = static_cast<unsigned int>(2 * config.size());
typename Configuration::MatConf::MatType psi(1, twosize);
for (uint k = 0; k < config.size(); ++k)
{
v1.tau = config[k].tau;
v2.tau = 0;
v1.spin = UP;
v2.spin = !sigmaprime;
psi(0,2*k) = Greensfunction::eval(v1, v2);
v1.spin = DOWN;
psi(0, 2*k + 1) = Greensfunction::eval(v1, v2);
}
config.matcont.multiplyVectorbyConfiguration_right(psi);
// psi = psi * config.matcont.mat;
GOmegaRetType sum3 = 0.0;
for (uint k = 0; k < config.size(); ++k)
{
FPType tau = config[k].tau;
uint tau_idx = points + static_cast<uint>(trunc(tau/delta_tau));/*see formula. that way we take the negative sign into consideration*/
sum3 += psi(0, 2*k) * accessphi(sigma, UP, sigmaprime, tau_idx);
sum3 += psi(0, 2*k + 1) * accessphi(sigma, DOWN, sigmaprime, tau_idx);
}
GOmegaRetType sum4 = 0.0;
typename Configuration::MatConf::MatType chi(twosize, 1);
for (uint k = 0; k < static_cast<uint>(config.size()); ++k)
{
v1.tau = 0;
v2.tau = config[k].tau;
v2.spin = UP;
v1.spin = sigmaprime;
chi(2*k, 0) = Greensfunction::eval(v1, v2);
v2.spin = DOWN;
chi(2*k + 1, 0) = Greensfunction::eval(v1, v2);
}
config.matcont.multiplyVectorbyConfiguration_left(chi);
// chi = config.matcont.mat * chi;
for (uint k = 0; k < static_cast<uint>(config.size()); ++k)
{
FPType tau = config[k].tau;
uint tau_idx = static_cast<uint>(trunc(tau/delta_tau));//tau should be positive being straight from a vertex
sum4 += chi(2*k, 0) * accessphi(sigma, !sigmaprime, UP, tau_idx);
sum4 += chi(2*k + 1, 0) * accessphi(sigma, !sigmaprime, DOWN, tau_idx);
}
GOmegaRetType sum5 = 0;//we can reuse psi and chi
for (uint r = 0; r < static_cast<uint>(config.size()); ++r)
for (uint s = 0; s < static_cast<uint>(config.size()); ++s)
{
FPType taurs = config[s].tau - config[r].tau;
uint tau_idx = 0;
if (taurs < 0)
{
tau_idx += points;
taurs = -taurs;
}
tau_idx += static_cast<uint>(trunc(taurs/delta_tau));
sum5 += psi(0, 2*r) * chi(2*s, 0)*accessphi(sigma, UP, UP, tau_idx);
sum5 += psi(0, 2*r+1) * chi(2*s, 0)*accessphi(sigma, DOWN, UP, tau_idx);
sum5 += psi(0, 2*r) * chi(2*s+1, 0)*accessphi(sigma, UP, DOWN, tau_idx);
sum5 += psi(0, 2*r+1) * chi(2*s+1, 0)*accessphi(sigma, DOWN, DOWN, tau_idx);
}
return sum1 - sum2 /*+*/- sum3 /*+*/- sum4 - sum5;
}
void contrib_dry(SPINS sigma, SPINS sigmaprime, DryRun<typename Configuration::value_type, GFRetVal>& func)
{
typename Greensfunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
typename Greensfunction::Vertex v2;
v2.spin = !sigmaprime;
v2.tau = 0;
func(v1, v2);
}
};
template <class Config, class Greensfunction>
void SpinSusceptibility_X<Config, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
contrib_dry(UP, UP, func);
contrib_dry(UP, DOWN, func);
contrib_dry(DOWN, UP, func);
contrib_dry(DOWN, DOWN, func);
return;
}
template <class Config, class Greensfunction>
void SpinSusceptibility_X<Config, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
GOmegaRetType retval = contrib(UP, UP, configuration, dowick) + contrib(UP, DOWN, configuration, dowick) + contrib(DOWN, UP, configuration, dowick) + contrib(DOWN, DOWN, configuration, dowick);
this->add_bin(retval/ 4.0);
return;
}
/**
The Spin-Susceptiblity in Z direction
*/
template <class Config, class Greensfunction>
class SpinSusceptibility_Z : public Network_Cache<Config, typename Greensfunction::GOmegaRetType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::Comm Net;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<GFRetVal> Function;
typedef typename Greensfunction::GOmegaRetType GOmegaRetType;
typedef GOmegaRetType ObservableType;
/**
*/
inline SpinSusceptibility_Z(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "SpinSusceptibility_Z"), len(params.N), beta(params.beta)
{
myf = new GOmegaRetType*[8];
delta_tau = beta/points;
const uint tablesize = 2 * points;
for (uint k = 0; k < 8; ++k)
{
myf[k] = new GOmegaRetType[tablesize];
memset(myf[k], 0, tablesize * sizeof(GOmegaRetType));
}
for (uint n = 0; n < points; ++n)
{
FPType omegan = M_PI/beta*(2*n+1);
for (int s = 0; s < 8; ++s)
{
SPINS sigma = (s>>2&1? DOWN: UP);
SPINS sigma_r = (s>>1&1? DOWN: UP);
SPINS sigma_s = (s&1? DOWN: UP);
typename Greensfunction::Vertex v1;
typename Greensfunction::Vertex v2;
v1.spin = sigma_r;
v2.spin = sigma;
GOmegaRetType goma = Greensfunction::gomega(omegan, v1, v2);
GOmegaRetType gomam = Greensfunction::gomega(-omegan, v1, v2);
v1.spin = sigma;
v2.spin = sigma_s;
GOmegaRetType gomb = Greensfunction::gomega(omegan, v1, v2);
GOmegaRetType gombm = Greensfunction::gomega(-omegan, v1, v2);
for (uint k = 0; k < points; ++k)
{
FPType tau = k*delta_tau;
std::complex<FPType> expt = exp(std::complex<FPType>(0.0, omegan * tau));
std::complex<FPType> expmt = std::conj(expt);
accessphi(sigma, sigma_r, sigma_s, k) += goma * gomb * expmt + gomam*gombm*expt;
accessphi(sigma, sigma_r, sigma_s, points + k) += goma * gomb * expt + gomam*gombm*expmt;
}
}
}
for (uint s = 0; s < 8; ++s)
for (uint k = 0; k < tablesize; ++k)
myf[s][k] /= beta;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
This determines the Spin-Susceptiblity for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && (GCC_VERSION > GCC_VER(4,5,0))
static constexpr uint points = 1000;
#else
static const uint points = 1000;
#endif
const uint32_t& len;
FPType beta;
FPType delta_tau;
GOmegaRetType** myf;
GOmegaRetType& accessphi(SPINS sigma, SPINS sigma_r, SPINS sigma_s, int tau_idx)
{
return myf[
(sigma == UP ? 0: 4) +
(sigma_r == UP ? 0: 2) +
(sigma_s == UP ? 0 : 1)
][tau_idx];
}
GOmegaRetType contrib(const SPINS sigma, const SPINS sigmaprime, const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
typename Greensfunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
typename Greensfunction::Vertex v2;
v2.spin = sigmaprime;
v2.tau = 0;
GOmegaRetType sum1 = dowick(v1, v2);
v1.spin = sigma;
v2.spin = sigma;
GFRetVal gf1 = beta * Greensfunction::eval(v1, v2);
GOmegaRetType suma = 0.0;
for (uint r = 0; r < config.size(); ++r)
for (uint s = 0; s < config.size(); ++s)
{
FPType taurs = config[r].tau - config[s].tau;
int tau_idx = 0;
if (taurs < 0)
{
tau_idx += points;
taurs = -taurs;
}
tau_idx += static_cast<int>(trunc(taurs/delta_tau));
suma += config.matcont(r, s, UP, UP) * accessphi(sigma, UP, UP, tau_idx);
suma += config.matcont(r, s, DOWN, UP) * accessphi(sigma, DOWN, UP, tau_idx);
suma += config.matcont(r, s, UP, DOWN) * accessphi(sigma, UP, DOWN, tau_idx);
suma += config.matcont(r, s, DOWN, DOWN) * accessphi(sigma, DOWN, DOWN, tau_idx);
}
sum1 *= (gf1 - suma);
GOmegaRetType sum2 = accessphi(sigma, sigmaprime, sigmaprime, 0);
typename Configuration::MatConf::MatType psi(1, static_cast<unsigned int>(2 * config.size()));
for (uint k = 0; k < config.size(); ++k)
{
v1.tau = config[k].tau;
v2.tau = 0;
v1.spin = UP;
v2.spin = sigmaprime;
psi(0,2*k) = Greensfunction::eval(v1, v2);
v1.spin = DOWN;
psi(0, 2*k + 1) = Greensfunction::eval(v1, v2);
}
config.matcont.multiplyVectorbyConfiguration_right(psi);
// psi = psi * config.matcont.mat;
GOmegaRetType sum3 = 0.0;
for (uint k = 0; k < config.size(); ++k)
{
FPType tau = config[k].tau;
int tau_idx = points + static_cast<int>(trunc(tau/delta_tau));/*see formula. that way we take the negative sign into consideration*/
sum3 += psi(0, 2*k) * accessphi(sigma, UP, sigmaprime, tau_idx);
sum3 += psi(0, 2*k + 1) * accessphi(sigma, DOWN, sigmaprime, tau_idx);
}
GOmegaRetType sum4 = 0.0;
typename Configuration::MatConf::MatType chi(static_cast<unsigned int>(2 * config.size()), 1);
for (uint k = 0; k < config.size(); ++k)
{
v1.tau = 0;
v2.tau = config[k].tau;
v2.spin = UP;
v1.spin = sigmaprime;
chi(2*k, 0) = Greensfunction::eval(v1, v2);
v2.spin = DOWN;
chi(2*k + 1, 0) = Greensfunction::eval(v1, v2);
}
config.matcont.multiplyVectorbyConfiguration_left(chi);
// chi = config.matcont.mat * chi;
for (uint k = 0; k < static_cast<uint>(config.size()); ++k)
{
FPType tau = config[k].tau;
int tau_idx = static_cast<int>(trunc(tau/delta_tau));//tau should be positive being straight from a vertex
sum4 += chi(2*k, 0) * accessphi(sigma, sigmaprime, UP, tau_idx);
sum4 += chi(2*k + 1, 0) * accessphi(sigma, sigmaprime, DOWN, tau_idx);
}
GOmegaRetType sum5 = 0;//we can reuse psi and chi
for (uint r = 0; r < static_cast<uint>(config.size()); ++r)
for (uint s = 0; s < static_cast<uint>(config.size()); ++s)
{
FPType taurs = config[s].tau - config[r].tau;
int tau_idx = 0;
if (taurs < 0)
{
tau_idx += points;
taurs = -taurs;
}
tau_idx += static_cast<int>(trunc(taurs/delta_tau));
sum5 += psi(0, 2*r) * chi(2*s, 0)*accessphi(sigma, UP, UP, tau_idx);
sum5 += psi(0, 2*r+1) * chi(2*s, 0)*accessphi(sigma, DOWN, UP, tau_idx);
sum5 += psi(0, 2*r) * chi(2*s+1, 0)*accessphi(sigma, UP, DOWN, tau_idx);
sum5 += psi(0, 2*r+1) * chi(2*s+1, 0)*accessphi(sigma, DOWN, DOWN, tau_idx);
}
return sum1 - sum2 - sum3 - sum4 - sum5;//although Davids thesis states this formula differently careful testing reveals this choice of signs(with sum 3 and sum4 negative) to be the right one. Note that the signs of David's thesis can be reestablished by giving psi and chi an additional sign, or by exchanging the order in which the vertices are evaluated. This shouldn't change the sign of sum5 since there's a product of psi and chi and hence any sign cancels.
}
void contrib_dry(SPINS sigma, SPINS sigmaprime, DryRun<typename Configuration::value_type, GFRetVal>& func)
{//yes, the only thing that we don't derive from tables only depends on sigmaprime
typename Greensfunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
func(v1, v1);
}
};
template <class Config, class Greensfunction>
void SpinSusceptibility_Z<Config, Greensfunction>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
contrib_dry(UP, UP, func);
contrib_dry(UP, DOWN, func);
contrib_dry(DOWN, UP, func);
contrib_dry(DOWN, DOWN, func);
return;
}
template <class Config, class Greensfunction>
void SpinSusceptibility_Z<Config, Greensfunction>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
GOmegaRetType retval = contrib(UP, UP, configuration, dowick) - contrib(UP, DOWN, configuration, dowick) - contrib(DOWN, UP, configuration, dowick) + contrib(DOWN, DOWN, configuration, dowick);
this->add_bin(retval/ 4.0);
return;
}
template <typename FPType>
struct Omegadata
{
FPType omega;
FPType omegasq;
FPType invomega;
};
/**
A class for measuring the local Greensfunctions on the impurity surrounding bath sites
*/
template <class Config, class GreensFunction, SPINS Spin>
class LocalBathGreensfunctions : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GreensFunction GF;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///<we pack all 80000 functions into a single valarray...
/**
The Constructor for the LocalBathGreensfunctions.
Notice that in unmixed[] we tabulate <d^\dagger c>
*/
LocalBathGreensfunctions(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "LocalBathGreensfunctions"), functionpoints(params.functionpoints), delta_s(params.delta_s),
beta(params.beta),
Ny(params.Nb*2),
Nx(params.Nx),
Nt(300 + 1),
delta_beta(params.beta/static_cast<FPType>(Nt - 1)),
Nw(4*Nt),
Nyt(Nt * Ny),
Nxyt(Nyt*params.Nx),
functions(Ny*params.Nx)
{
#ifdef _OPENMP
double start2 = omp_get_wtime();
#endif
std::cout<<"Creating Local Bath Greensfunction"<<std::endl;
const GOmegaData<FPType> *const updata = GF::gomegaup->data;
const GOmegaData<FPType> *const downdata = GF::gomegadown->data;
//let's set up the Q(r,n,omega_n) data
Omegadata<FPType>* omega = new Omegadata<FPType>[Nw/2];
std::complex<FPType>* gup = new std::complex<FPType>[Nw];
std::complex<FPType>* gdown = new std::complex<FPType>[Nw];
for (int t = 0; t < Nw/2; ++t)
{
gup[2*t] = conj((*GF::gomegaup)(t));
gup[2*t + 1] = conj((*GF::gomegaup)( -t - 1));
gdown[2*t] = conj((*GF::gomegadown)(t));
gdown[2*t + 1] = conj((*GF::gomegadown)( -t - 1));
omega[t].omega = M_PI/params.beta*(2*t + 1);
omega[t].omegasq = omega[t].omega*omega[t].omega;
omega[t].invomega = 1.0/omega[t].omega;
}
FPType pref = GF::gethybridization() / std::sqrt(params.Nx);
unmixeddata = new std::complex<float>[2*Nxyt];
//Since we have a simple, analytical expression for the tau dependence of the offset of the unmixed greensfunctions
//we write that one out first to the unmixeddata array
FPType* cosharrayup = new FPType[functions];
FPType* cosharraydown = new FPType[functions];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
cosharrayup[k*Ny + m] = 1.0/std::cosh(params.beta*updata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
cosharraydown[k*Ny + m] = 1.0/std::cosh(params.beta*downdata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
}
#pragma omp parallel for
for(uint n = 0; n < Ny; ++n)
{
FPType* tempup = new FPType[functions];
FPType* tempdown = new FPType[functions];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
tempup[k*Ny + m] = cosharrayup[k*Ny + m] * norm(updata[k*Ny + /*n*/m].evec[/*m*/n]);
tempdown[k*Ny + m] = cosharraydown[k*Ny + m] * norm(downdata[k*Ny + /*n*/m].evec[/*m*/n]);
}
for(uint t = 0; t < Nt; ++t)
{
FPType ftup = 0.0;
FPType ftdown = 0.0;
FPType arg = t * delta_beta - params.beta/2.0;
FPType cup = 0.0;
FPType cdown = 0.0;
for(uint k = 0; k < params.Nx; ++k)//being careful we employ Kahan summation
for(uint m = 0; m < Ny; ++m)
{
FPType argup = tempup[k*Ny + m] * std::exp(updata[k*Ny + m].lambda*arg);
FPType argdown = tempdown[k*Ny + m] * std::exp(downdata[k*Ny + m].lambda*arg);
FPType y = argup - cup;
FPType t = ftup + y;
cup = (t - ftup) - y;
ftup = t;
y = argdown - cdown;
t = ftdown + y;
cdown = (t - ftdown) - y;
ftdown = t;
}
for(uint r = 0; r < params.Nx; ++r)
{
(unmixeddata + n*Nt*params.Nx + r*Nt)[t] = static_cast<float>(ftup);
(unmixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] = static_cast<float>(ftdown);
}
}
delete [] tempup;
delete [] tempdown;
}
delete [] cosharrayup;
delete [] cosharraydown;
const uint Nxw = Nw*params.Nx;
const std::complex<FPType> expNt = std::exp(std::complex<FPType>(0.0, -M_PI/(Nt-1)));
const FPType normierungunmixed = 1.0/params.beta / params.Nx;
const FPType normierungmixed = 1.0/params.beta / std::sqrt(params.Nx);
mixeddata = new std::complex<float>[2*Nxyt];
std::ofstream gimag("gimag.txt");
//#pragma omp parallel for
for (uint n = 0; n < Ny; ++n)
{
std::complex<FPType>* Qup = new std::complex<FPType>[Nxw];
std::complex<FPType>* Qdown = new std::complex<FPType>[Nxw];
std::complex<FPType>* funup = new std::complex<FPType>[Nxw];
std::complex<FPType>* fundown = new std::complex<FPType>[Nxw];
memset(funup, 0, Nxw*sizeof(std::complex<FPType>));
memset(fundown, 0, Nxw*sizeof(std::complex<FPType>));
// double start = omp_get_wtime();
for (uint k = 0; k < params.Nx; ++k)
{
for (uint m = 0; m < Ny; ++m)
{
std::complex<FPType> facup = conj(updata [k*Ny + m].u) * updata[k*Ny + m].evec[n];
std::complex<FPType> facdown = conj(downdata[k*Ny + m].u) * downdata[k*Ny + m].evec[n];
for (int omega_idx = 0; omega_idx < Nw/2; ++omega_idx)
{//the layout of the frequencies is now (w_n, -w_n) , that is every negative frequency is stored next to its positive counterpart.
//Hopefully this gives a better data locality
funup[2*omega_idx*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, -omega[omega_idx].omega);
funup[(2*omega_idx + 1)*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, omega[omega_idx].omega);
fundown[2*omega_idx*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, -omega[omega_idx].omega);
fundown[(2*omega_idx + 1)*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, omega[omega_idx].omega);
}
}
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint w = 0; w < Nw; ++w)
{
fourier1(reinterpret_cast<FPType*>(funup + w*params.Nx), params.Nx, 1);
fourier1(reinterpret_cast<FPType*>(fundown + w*params.Nx), params.Nx, 1);
//funup as well as fundown now contain Q(r, i omega) for a particular value of the orbital n
for (uint r = 0; r < params.Nx; ++r)
{
funup[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
fundown[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
(Qup + r*Nw)[w] = funup[w*params.Nx + r];//norm(funup[w*params.Nx + r]);
(Qdown + r*Nw)[w] = fundown[w*params.Nx + r];//norm(fundown[w*params.Nx + r]);
// funup[w*params.Nx + r] = funup[w*params.Nx + r];
// fundown[w*params.Nx + r] = fundown[w*params.Nx + r];
}
}
/* for(uint r = 0; r < params.Nx; ++r)
{
for(uint w = 0; w < Nw/2; ++w)
{
gimag<<omega[w].omega<<" "<<real(funup[2*w*params.Nx + r]*gup[2*w])<<std::endl;
}
gimag<<"&"<<std::endl;
}*/
for(uint r = 0; r < params.Nx; ++r)
for(uint w = 0; w < Nw/2; ++w)
{
std::complex<FPType> temp = conj((Qup + r*Nw)[2*w]);
(Qup + r*Nw)[2*w] *= conj((Qup + r*Nw)[2*w+1]);
(Qup + r*Nw)[2*w+1] *= temp;
temp = conj((Qdown + r*Nw)[2*w]);
(Qdown + r*Nw)[2*w] *= conj((Qdown + r*Nw)[2*w+1]);
(Qdown + r*Nw)[2*w+1] *= temp;
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint r = 0; r < params.Nx; ++r)
{
std::complex<FPType> expt = 1;
for (uint t = 0; t < Nt; ++t)//here is the final Matsubara transform
{
std::complex<FPType> tempup = 0;
std::complex<FPType> tempdown = 0;
std::complex<FPType> tempupmixed = 0;
std::complex<FPType> tempdownmixed = 0;
FPType tau = t*delta_beta;
std::complex<FPType> expiom = expt;//std::exp(tau * std::complex<FPType>(0.0, omega[0].omega));
std::complex<FPType> expfac = expiom * expiom;
for (int omega_idx = 0; omega_idx < Nw/2; ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
std::complex<FPType> cexpiom = conj(expiom);
tempup += cexpiom * (Qup + r * Nw)[2*omega_idx] * gupp
+ /*c*/expiom * (Qup + r * Nw)[2*omega_idx + 1] * gupm;
tempdown += cexpiom * (Qdown + r * Nw)[2*omega_idx] * gdownp
+/*c*/expiom*(Qdown + r * Nw)[2*omega_idx + 1] * gdownm;
tempupmixed += cexpiom*(funup[(2*omega_idx)*params.Nx + r] * gupp)
+ /*c*/expiom * (funup[(2*omega_idx + 1)*params.Nx + r] * gupm);
tempdownmixed += cexpiom*(fundown[(2*omega_idx)*params.Nx + r] * gdownp)
+ /*c*/expiom * (fundown[(2*omega_idx + 1)*params.Nx + r] * gdownm);
expiom *= expfac;
}
// test<<tempupmixed*normierungmixed<<std::endl;
//The unmixeddata has been debugged in comparison to a straightforward calculation from the real-space Hamiltonian
(unmixeddata + n*Nt*params.Nx + r*Nt)[t] += tempup*normierungunmixed;
(unmixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] += tempdown*normierungunmixed;
//the sign of the mixeddata is wrong. But since for now mixeddata is only accessed as some squared quantity it doesn't hurt.
(mixeddata + n*Nt*params.Nx + r*Nt)[t] = tempupmixed*normierungmixed;
(mixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] = tempdownmixed*normierungmixed;
expt *= expNt;
}
}
delete [] funup;
delete [] fundown;
delete [] Qup;
delete [] Qdown;
}
// exit(-1);
#ifdef _OPENMP
std::cout<<"Initialization took "<<omp_get_wtime() - start2<<" seconds"<<std::endl;
#endif
delete [] gup;
delete [] gdown;
delete [] omega;
std::cout<<"Local Bath GreensFunction done"<<std::endl;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) {}
/**
this determines the LocalBathGreensfunctions for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& functionpoints;
const FPType delta_s;
const FPType beta;
std::complex<float>* mixeddata;///< here we store <d^+ gamma>. We use floats to keep the memory footprint small
std::complex<float>* unmixeddata;///< here we store <gamma^+ gamma>
const uint Ny;
const uint Nx;
const uint Nt;
const FPType delta_beta;
const uint Nw;
const uint Nyt;
const uint Nxyt;
const uint32_t functions;
std::complex<FPType> accessmixed(uint r, uint n, SPINS spin, FPType tau1, FPType tau2 ) const
{
const FPType tiny = std::numeric_limits<FPType>::epsilon();
FPType delta_tau = tau1 - tau2;
FPType sign = 1.0;
std::complex<float>* dataptr = mixeddata;
if (spin == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nt*Nx + r*Nt;
if (std::abs(delta_tau) < tiny)
{
//return only the particle number
return std::complex<FPType>(dataptr[0]);
}
if(delta_tau < 0)
{
sign = -1.0;
delta_tau += beta;
}
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're of
std::size_t tau_idx0 = lround(fptau_idx0);
// std::cout<<"tau_0: "<<tau_idx0<<" "<<g[tau_idx0]<<std::endl;
return std::complex<FPType>(lerp(float(rem), dataptr[tau_idx0], dataptr[tau_idx0 + 1]))*sign;
}
std::complex<FPType> accessunmixed(uint r, uint n, SPINS spin, FPType tau) const
{
std::complex<float>* dataptr = unmixeddata;
if (spin == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nt*Nx + r*Nt;
FPType fptau_idx0;
FPType rem = std::modf(tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're of
std::size_t tau_idx0 = static_cast<std::size_t>(lround(fptau_idx0));
// std::cout<<"tau_0: "<<tau_idx0<<" "<<g[tau_idx0]<<std::endl;
//if(tau_idx0 == Nt) return dataptr[tau_idx0];
return std::complex<FPType>(lerp(float(rem), dataptr[tau_idx0], dataptr[tau_idx0 + 1]));
}
};
template <class Config, class GreensFunction, SPINS Spin>
void LocalBathGreensfunctions<Config, GreensFunction, Spin>::evaluate(const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(functions);
#pragma omp parallel for
for (unsigned int k = 0; k < func.size(); ++k)
{
func[k].resize(functionpoints);
uint n = k / Nx;
uint r = k % Nx;
for (unsigned int j = 0; j < functionpoints/*-1*/; ++j)
{
const FPType tau = j * delta_s;
// uint unmixedidx = trunc(tau/delta_beta);
GFRetVal t1 = accessunmixed(r, n, Spin, tau);
// (unmixeddata + k*Nt + r*Nt)[unmixedidx];//access the up-sector
GFRetVal t2 = 0;
for (uint q = 0; q < config.size(); ++q)
for (uint s = 0; s < config.size(); ++s)
{
if(Spin == UP)
t2 += config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, 0) * conj(accessmixed(r, n, UP, tau, config[s].tau));
t2 += config.matcont.mat(2*q, 2*s + 1) * static_cast<FPType>(0.0);//For now disabled
t2 += config.matcont.mat(2*q + 1, 2*s) * static_cast<FPType>(0.0);//For now disabled
if(Spin == DOWN)
t2 += config.matcont.mat(2*q + 1, 2*s + 1) * accessmixed(r, n, DOWN, config[q].tau, 0) * conj(accessmixed(r, n, DOWN, tau, config[s].tau));
}
//add to measurement
func[k][j] = (t1 - t2)* config.phase;
}
/* const FPType tau = beta-0.001;
// uint unmixedidx = trunc(tau/delta_beta);
GFRetVal t1 = accessunmixed(r, n, UP, tau);
// (unmixeddata + k*Nt + r*Nt)[unmixedidx];//access the up-sector
GFRetVal t2 = 0;
for (uint r = 0; r < config.size(); ++r)
for (uint s = 0; s < config.size(); ++s)
{
t2 += config.matcont.mat(2*r, 2*s) * accessmixed(r, n, UP, config[r].tau, 0) * conj(accessmixed(r, n, UP, tau, config[s].tau));
t2 += config.matcont.mat(2*r, 2*s + 1) * static_cast<FPType>(0.0);//For now disabled
t2 += config.matcont.mat(2*r + 1, 2*s) * static_cast<FPType>(0.0);//For now disabled
t2 += config.matcont.mat(2*r + 1, 2*s + 1) * accessmixed(r, n, DOWN, config[r].tau, 0) * conj(accessmixed(r, n, DOWN, tau, config[s].tau));
}
//add to measurement
func[k][functionpoints - 1] = (t1 - t2)* config.phase;*/
}
this->add_bin(func);
return;
}
/**
A class for measuring the local Greensfunctions on the impurity surrounding bath sites, averaged over both spin sectors
*/
template <class Config, class GreensFunction>
class LocalBathGreensfunctions_averaged : public Network_Cache<Config, std::valarray<std::valarray<typename Config::SignType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GreensFunction GF;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<Function> ObservableType;///<we pack all 80000 functions into a single valarray...
/**
The Constructor for the LocalBathGreensfunctions.
Notice that in unmixed[] we tabulate <d^\dagger c>
*/
LocalBathGreensfunctions_averaged(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config, ObservableType>(n, "LocalBathGreensfunctions_averaged"), functionpoints(params.functionpoints), delta_s(params.delta_s),
beta(params.beta),
Ny(params.Nb*2),
Nx(params.Nx),
Nt(300 + 1),
delta_beta(params.beta/static_cast<FPType>(Nt - 1)),
Nw(4*Nt),
Nyt(Nt * Ny),
Nxyt(Nyt*params.Nx),
functions(Ny*params.Nx)
{
#ifdef _OPENMP
double start2 = omp_get_wtime();
#endif
std::cout<<"Creating Local Bath Greensfunction(the averaged one...)"<<std::endl;
const GOmegaData<FPType> *const updata = GF::gomegaup->data;
const GOmegaData<FPType> *const downdata = GF::gomegadown->data;
//let's set up the Q(r,n,omega_n) data
Omegadata<FPType>* omega = new Omegadata<FPType>[Nw/2];
std::complex<FPType>* gup = new std::complex<FPType>[Nw];
std::complex<FPType>* gdown = new std::complex<FPType>[Nw];
for (int t = 0; t < Nw/2; ++t)
{
gup[2*t] = conj((*GF::gomegaup)(t));
gup[2*t + 1] = conj((*GF::gomegaup)( -t - 1));
gdown[2*t] = conj((*GF::gomegadown)(t));
gdown[2*t + 1] = conj((*GF::gomegadown)( -t - 1));
omega[t].omega = M_PI/params.beta*(2*t + 1);
omega[t].omegasq = omega[t].omega*omega[t].omega;
omega[t].invomega = 1.0/omega[t].omega;
}
FPType pref = GF::gethybridization() / std::sqrt(params.Nx);
unmixeddata = new std::complex<float>[2*Nxyt];
//Since we have a simple, analytical expression for the tau dependence of the offset of the unmixed greensfunctions
//we write that one out first to the unmixeddata array
FPType* cosharrayup = new FPType[functions];
FPType* cosharraydown = new FPType[functions];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
cosharrayup[k*Ny + m] = 1.0/std::cosh(params.beta*updata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
cosharraydown[k*Ny + m] = 1.0/std::cosh(params.beta*downdata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
}
#pragma omp parallel for
for(uint n = 0; n < Ny; ++n)
{
FPType* tempup = new FPType[functions];
FPType* tempdown = new FPType[functions];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
tempup[k*Ny + m] = cosharrayup[k*Ny + m] * norm(updata[k*Ny + /*n*/m].evec[/*m*/n]);
tempdown[k*Ny + m] = cosharraydown[k*Ny + m] * norm(downdata[k*Ny + /*n*/m].evec[/*m*/n]);
}
for(uint t = 0; t < Nt; ++t)
{
FPType ftup = 0.0;
FPType ftdown = 0.0;
FPType arg = t * delta_beta - params.beta/2.0;
FPType cup = 0.0;
FPType cdown = 0.0;
for(uint k = 0; k < params.Nx; ++k)//being careful we employ Kahan summation
for(uint m = 0; m < Ny; ++m)
{
FPType argup = tempup[k*Ny + m] * std::exp(updata[k*Ny + m].lambda*arg);
FPType argdown = tempdown[k*Ny + m] * std::exp(downdata[k*Ny + m].lambda*arg);
FPType y = argup - cup;
FPType t = ftup + y;
cup = (t - ftup) - y;
ftup = t;
y = argdown - cdown;
t = ftdown + y;
cdown = (t - ftdown) - y;
ftdown = t;
}
for(uint r = 0; r < params.Nx; ++r)
{
(unmixeddata + n*Nt*params.Nx + r*Nt)[t] = static_cast<float>(ftup);
(unmixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] = static_cast<float>(ftdown);
}
}
delete [] tempup;
delete [] tempdown;
}
delete [] cosharrayup;
delete [] cosharraydown;
const uint Nxw = Nw*params.Nx;
const std::complex<FPType> expNt = std::exp(std::complex<FPType>(0.0, -M_PI/(Nt-1)));
const FPType normierungunmixed = 1.0/params.beta / params.Nx;
const FPType normierungmixed = 1.0/params.beta / std::sqrt(params.Nx);
mixeddata = new std::complex<float>[2*Nxyt];
#pragma omp parallel for
for (uint n = 0; n < Ny; ++n)
{
std::complex<FPType>* Qup = new std::complex<FPType>[Nxw];
std::complex<FPType>* Qdown = new std::complex<FPType>[Nxw];
std::complex<FPType>* funup = new std::complex<FPType>[Nxw];
std::complex<FPType>* fundown = new std::complex<FPType>[Nxw];
memset(funup, 0, Nxw*sizeof(std::complex<FPType>));
memset(fundown, 0, Nxw*sizeof(std::complex<FPType>));
// double start = omp_get_wtime();
for (uint k = 0; k < params.Nx; ++k)
{
for (uint m = 0; m < Ny; ++m)
{
std::complex<FPType> facup = conj(updata [k*Ny + m].u) * updata[k*Ny + m].evec[n];
std::complex<FPType> facdown = conj(downdata[k*Ny + m].u) * downdata[k*Ny + m].evec[n];
for (int omega_idx = 0; omega_idx < Nw/2; ++omega_idx)
{//the layout of the frequencies is now (w_n, -w_n) , that is every negative frequency is stored next to its positive counterpart.
//Hopefully this gives a better data locality
funup[2*omega_idx*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, -omega[omega_idx].omega);
funup[(2*omega_idx + 1)*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, omega[omega_idx].omega);
fundown[2*omega_idx*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, -omega[omega_idx].omega);
fundown[(2*omega_idx + 1)*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, omega[omega_idx].omega);
}
}
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint w = 0; w < Nw; ++w)
{
fourier1(reinterpret_cast<FPType*>(funup + w*params.Nx), params.Nx, 1);
fourier1(reinterpret_cast<FPType*>(fundown + w*params.Nx), params.Nx, 1);
//funup as well as fundown now contain Q(r, i omega) for a particular value of the orbital n
for (uint r = 0; r < params.Nx; ++r)
{
funup[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
fundown[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
(Qup + r*Nw)[w] = funup[w*params.Nx + r];//norm(funup[w*params.Nx + r]);
(Qdown + r*Nw)[w] = fundown[w*params.Nx + r];//norm(fundown[w*params.Nx + r]);
// funup[w*params.Nx + r] = funup[w*params.Nx + r];
// fundown[w*params.Nx + r] = fundown[w*params.Nx + r];
}
}
for(uint r = 0; r < params.Nx; ++r)
for(uint w = 0; w < Nw/2; ++w)
{
std::complex<FPType> temp = conj((Qup + r*Nw)[2*w]);
(Qup + r*Nw)[2*w] *= conj((Qup + r*Nw)[2*w+1]);
(Qup + r*Nw)[2*w+1] *= temp;
temp = conj((Qdown + r*Nw)[2*w]);
(Qdown + r*Nw)[2*w] *= conj((Qdown + r*Nw)[2*w+1]);
(Qdown + r*Nw)[2*w+1] *= temp;
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint r = 0; r < params.Nx; ++r)
{
std::complex<FPType> expt = 1;
for (uint t = 0; t < Nt; ++t)//here is the final Matsubara transform
{
std::complex<FPType> tempup = 0;
std::complex<FPType> tempdown = 0;
std::complex<FPType> tempupmixed = 0;
std::complex<FPType> tempdownmixed = 0;
FPType tau = t*delta_beta;
std::complex<FPType> expiom = expt;//std::exp(tau * std::complex<FPType>(0.0, omega[0].omega));
std::complex<FPType> expfac = expiom * expiom;
for (int omega_idx = 0; omega_idx < Nw/2; ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
std::complex<FPType> cexpiom = conj(expiom);
tempup += cexpiom * (Qup + r * Nw)[2*omega_idx] * gupp
+ /*c*/expiom * (Qup + r * Nw)[2*omega_idx + 1] * gupm;
tempdown += cexpiom * (Qdown + r * Nw)[2*omega_idx] * gdownp
+/*c*/expiom*(Qdown + r * Nw)[2*omega_idx + 1] * gdownm;
tempupmixed += cexpiom*(funup[(2*omega_idx)*params.Nx + r] * gupp)
+ /*c*/expiom * (funup[(2*omega_idx + 1)*params.Nx + r] * gupm);
tempdownmixed += cexpiom*(fundown[(2*omega_idx)*params.Nx + r] * gdownp)
+ /*c*/expiom * (fundown[(2*omega_idx + 1)*params.Nx + r] * gdownm);
expiom *= expfac;
}
// test<<tempupmixed*normierungmixed<<std::endl;
//The unmixeddata has been debugged in comparison to a straightforward calculation from the real-space Hamiltonian
(unmixeddata + n*Nt*params.Nx + r*Nt)[t] += tempup*normierungunmixed;
(unmixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] += tempdown*normierungunmixed;
//the sign of the mixeddata is wrong. But since for now mixeddata is only accessed as some squared quantity it doesn't hurt.
(mixeddata + n*Nt*params.Nx + r*Nt)[t] = tempupmixed*normierungmixed;
(mixeddata + Nxyt + n*Nt*params.Nx + r*Nt)[t] = tempdownmixed*normierungmixed;
expt *= expNt;
}
}
delete [] funup;
delete [] fundown;
delete [] Qup;
delete [] Qdown;
}
#ifdef _OPENMP
std::cout<<"Initialization took "<<omp_get_wtime() - start2<<" seconds"<<std::endl;
#endif
delete [] gup;
delete [] gdown;
delete [] omega;
std::cout<<"Local Bath GreensFunction done"<<std::endl;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&) {}
/**
This determines the LocalBathGreensfunctions for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& functionpoints;
const FPType delta_s;
const FPType beta;
std::complex<float>* mixeddata;///< here we store <d^+ gamma>. We use floats to keep the memory footprint small
std::complex<float>* unmixeddata;///< here we store <gamma^+ gamma>
const uint Ny;
const uint Nx;
const uint Nt;
const FPType delta_beta;
const uint Nw;
const uint Nyt;
const uint Nxyt;
const uint32_t functions;
std::complex<FPType> accessmixed(uint r, uint n, SPINS spin, FPType tau1, FPType tau2 ) const
{
const FPType tiny = std::numeric_limits<FPType>::epsilon();
FPType delta_tau = tau1 - tau2;
FPType sign = 1.0;
std::complex<float>* dataptr = mixeddata;
if (spin == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nt*Nx + r*Nt;
if (std::abs(delta_tau) < tiny)
{
//return only the particle number
return std::complex<FPType>(dataptr[0]);
}
if(delta_tau < 0)
{
sign = -1.0;
delta_tau += beta;
}
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're of
std::size_t tau_idx0 = lround(fptau_idx0);
// std::cout<<"tau_0: "<<tau_idx0<<" "<<g[tau_idx0]<<std::endl;
return std::complex<FPType>(lerp(float(rem), dataptr[tau_idx0], dataptr[tau_idx0 + 1]))*sign;
}
std::complex<FPType> accessunmixed(uint r, uint n, SPINS spin, FPType tau) const
{
std::complex<float>* dataptr = unmixeddata;
if (spin == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nt*Nx + r*Nt;
FPType fptau_idx0;
FPType rem = std::modf(tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're of
std::size_t tau_idx0 = static_cast<std::size_t>(lround(fptau_idx0));
// std::cout<<"tau_0: "<<tau_idx0<<" "<<g[tau_idx0]<<std::endl;
//if(tau_idx0 == Nt) return dataptr[tau_idx0];
return std::complex<FPType>(lerp(float(rem), dataptr[tau_idx0], dataptr[tau_idx0 + 1]));
}
};
template <class Config, class GreensFunction>
void LocalBathGreensfunctions_averaged<Config, GreensFunction>::evaluate(const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(functions);
// ofstream file("t2.txt");
//#pragma omp parallel for
for (unsigned int k = 0; k < func.size(); ++k)
{
func[k].resize(functionpoints);
uint n = k / Nx;
uint r = k % Nx;
for (unsigned int j = 0; j < functionpoints/*-1*/; ++j)
{
const FPType tau = j * delta_s;
// uint unmixedidx = trunc(tau/delta_beta);
GFRetVal t1 = accessunmixed(r, n, UP, tau) + accessunmixed(r, n, DOWN, tau);
// (unmixeddata + k*Nt + r*Nt)[unmixedidx];//access the up-sector
GFRetVal t2 = 0;
for (uint q = 0; q < config.size(); ++q)
for (uint s = 0; s < config.size(); ++s)
{
t2 += config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, 0) * conj(accessmixed(r, n, UP, config[s].tau, tau));
// t2 += config.matcont.mat(2*q, 2*s + 1) * static_cast<FPType>(0.0);//For now disabled
// t2 += config.matcont.mat(2*q + 1, 2*s) * static_cast<FPType>(0.0);//For now disabled
t2 += config.matcont.mat(2*q + 1, 2*s + 1) * accessmixed(r, n, DOWN, config[q].tau, 0) * conj(accessmixed(r, n, DOWN, config[s].tau, tau));
}
// file<<j<<" "<<real(t2)<<std::endl;
//add to measurement
func[k][j] = (t1 + t2)* config.phase/2.0;
}
// file<<"&"<<std::endl;
}
// exit(-1);
this->add_bin(func);
return;
}
/**
A class for measuring the Kondo-cloud as evidenced by the correlation function
<S^z_d S^z_c (x)>
where d denotes the dot electron and c the bath electron. All this as a function of distance from the dot.
*/
template <class Config, class GreensFunction, SPINS Spin>
class KondoCloud_Z : public Network_Cache<Config, std::valarray<typename Config::SignType> >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GreensFunction GF;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<GFRetVal> ObservableType;///<the Kondo Cloud has no time-dependence. it depends only on the position
/**
The Constructor for the KondoCloud
*/
KondoCloud_Z(typename Config::Comm& n, const Parameters& params)/* throw()*/ : Network_Cache<Config, ObservableType>(n, "KondoCloud_Z"), functionpoints(params.functionpoints), beta(params.beta), Ny(params.Nb*2),
Nx(params.Nx),
Nt(300 + 1),
Nw(4*Nt),
delta_beta(params.beta/static_cast<FPType>(Nt-1)),
sites(Ny*params.Nx),
Nxyt(sites*Nt)
{
#ifdef _OPENMP
double start2 = omp_get_wtime();
#endif
std::cout<<"Creating KondoCloud_Z"<<std::endl;
const GOmegaData<FPType> *const updata = GF::gomegaup->data;
const GOmegaData<FPType> *const downdata = GF::gomegadown->data;
//let's set up the Q(r, n, omega_n) data
Omegadata<FPType>* omega = new Omegadata<FPType>[Nw/2];
std::complex<FPType>* gup = new std::complex<FPType>[Nw];
std::complex<FPType>* gdown = new std::complex<FPType>[Nw];
for (int t = 0; t < static_cast<int>(Nw/2); ++t)
{
gup[2*t] = conj((*GF::gomegaup)(t));
gup[2*t + 1] = conj((*GF::gomegaup)( -t - 1));
gdown[2*t] = conj((*GF::gomegadown)(t));
gdown[2*t + 1] = conj((*GF::gomegadown)( -t - 1));
omega[t].omega = M_PI/params.beta*(2*t + 1);
omega[t].omegasq = omega[t].omega*omega[t].omega;
omega[t].invomega = 1.0/omega[t].omega;
}
FPType pref = GF::gethybridization() / std::sqrt(params.Nx);
unmixeddata = new std::complex<FPType>[2*sites];
//Since we have a simple, analytical expression for the tau dependence of the offset of the unmixed greensfunctions
//we write that one out first to the unmixeddata array
FPType* cosharrayup = new FPType[sites];
FPType* cosharraydown = new FPType[sites];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
cosharrayup[k*Ny + m] = 1.0/std::cosh(params.beta*updata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
cosharraydown[k*Ny + m] = 1.0/std::cosh(params.beta*downdata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
}
#pragma omp parallel for
for(uint n = 0; n < Ny; ++n)
{
FPType* tempup = new FPType[sites];
FPType* tempdown = new FPType[sites];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
tempup[k*Ny + m] = cosharrayup[k*Ny + m] * norm(updata[k*Ny + /*n*/m].evec[/*m*/n]);
tempdown[k*Ny + m] = cosharraydown[k*Ny + m] * norm(downdata[k*Ny + /*n*/m].evec[/*m*/n]);
}
FPType ftup = 0.0;
FPType ftdown = 0.0;
FPType arg = - params.beta/2.0;//from that we only need a tau == 0 quantity
FPType cup = 0.0;
FPType cdown = 0.0;
for(uint k = 0; k < params.Nx; ++k)//being careful we employ Kahan summation
for(uint m = 0; m < Ny; ++m)
{
FPType argup = tempup[k*Ny + m] * std::exp(updata[k*Ny + m].lambda*arg);
FPType argdown = tempdown[k*Ny + m] * std::exp(downdata[k*Ny + m].lambda*arg);
FPType y = argup - cup;
FPType t = ftup + y;
cup = (t - ftup) - y;
ftup = t;
y = argdown - cdown;
t = ftdown + y;
cdown = (t - ftdown) - y;
ftdown = t;
}
for(uint r = 0; r < params.Nx; ++r)
{
(unmixeddata + n*params.Nx)[r] = static_cast<float>(ftup);
(unmixeddata + sites + n*params.Nx)[r] = static_cast<float>(ftdown);
}
delete [] tempup;
delete [] tempdown;
}
delete [] cosharrayup;
delete [] cosharraydown;
const unsigned int Nxw = Nw*params.Nx;
FPType normierungunmixed = 1.0/params.beta / params.Nx;
FPType normierungmixed = 1.0/params.beta / std::sqrt(params.Nx);
const std::complex<FPType> expNt = std::exp(std::complex<FPType>(0.0, -M_PI/(Nt-1)));
mixeddata = new std::complex<FPType>[2*Nxyt];
#pragma omp parallel for
for (uint n = 0; n < Ny; ++n)
{
std::complex<FPType>* Qup = new std::complex<FPType>[Nxw];
std::complex<FPType>* Qdown = new std::complex<FPType>[Nxw];
std::complex<FPType>* funup = new std::complex<FPType>[Nxw];
std::complex<FPType>* fundown = new std::complex<FPType>[Nxw];
memset(funup, 0, Nxw*sizeof(std::complex<FPType>));
memset(fundown, 0, Nxw*sizeof(std::complex<FPType>));
// double start = omp_get_wtime();
// std::cout<<"n = "<<n<<std::endl;
for (uint k = 0; k < params.Nx; ++k)
{
for (uint m = 0; m < Ny; ++m)
{
std::complex<FPType> facup = conj(updata [k*Ny + m].u) * updata[k*Ny + m].evec[n];
std::complex<FPType> facdown = conj(downdata[k*Ny + m].u) * downdata[k*Ny + m].evec[n];
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{//the layout of the frequencies is now (w_n, -w_n) , that is every negative frequency is stored next to its positive counterpart.
//Hopefully this gives a better data locality
funup[2*omega_idx*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, -omega[omega_idx].omega);
funup[(2*omega_idx + 1)*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, omega[omega_idx].omega);
fundown[2*omega_idx*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, -omega[omega_idx].omega);
fundown[(2*omega_idx + 1)*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, omega[omega_idx].omega);
}
}
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint w = 0; w < Nw; ++w)
{
fourier1(reinterpret_cast<FPType*>(funup + w*params.Nx), params.Nx, 1);
fourier1(reinterpret_cast<FPType*>(fundown + w*params.Nx), params.Nx, 1);
//funup as well as fundown now contain Q(r, i omega) for a particular value of the orbital n
for (uint r = 0; r < params.Nx; ++r)
{
funup[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
fundown[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
(Qup + r*Nw)[w] = funup[w*params.Nx + r];//norm(funup[w*params.Nx + r]);
(Qdown + r*Nw)[w] = fundown[w*params.Nx + r];//norm(fundown[w*params.Nx + r]);
// funup[w*params.Nx + r] = funup[w*params.Nx + r];
// fundown[w*params.Nx + r] = fundown[w*params.Nx + r];
}
}
for(uint r = 0; r < params.Nx; ++r)
for(uint w = 0; w < Nw/2; ++w)
{
std::complex<FPType> temp = conj((Qup + r*Nw)[2*w]);
(Qup + r*Nw)[2*w] *= conj((Qup + r*Nw)[2*w+1]);
(Qup + r*Nw)[2*w+1] *= temp;
temp = conj((Qdown + r*Nw)[2*w]);
(Qdown + r*Nw)[2*w] *= conj((Qdown + r*Nw)[2*w+1]);
(Qdown + r*Nw)[2*w+1] *= temp;
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint r = 0; r < params.Nx; ++r)
{
std::complex<FPType> expt = 1;
{//an empty block for the unmixeddata
std::complex<FPType> tempup = 0;
std::complex<FPType> tempdown = 0;
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
tempup += (Qup + r * Nw)[2*omega_idx] * gupp + (Qup + r * Nw)[2*omega_idx + 1] * gupm;
tempdown += (Qdown + r * Nw)[2*omega_idx] * gdownp + (Qdown + r * Nw)[2*omega_idx + 1] * gdownm;
}
(unmixeddata + n*params.Nx)[r] += tempup*normierungunmixed;
(unmixeddata + sites + n*params.Nx)[r] += tempdown*normierungunmixed;
}
for(uint t = 0; t < Nt; ++t)// here is the final Matsubara transform
{
std::complex<FPType> tempupmixed = 0;
std::complex<FPType> tempdownmixed = 0;
std::complex<FPType> expiom = expt;
std::complex<FPType> expfac = expiom*expiom;
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
std::complex<FPType> cexpiom = conj(expiom);
tempupmixed += cexpiom * funup[(2*omega_idx)*params.Nx + r] * gupp + expiom * funup[(2*omega_idx + 1)*params.Nx + r] * gupm;
tempdownmixed += cexpiom * fundown[(2*omega_idx)*params.Nx + r] * gdownp + expiom * fundown[(2*omega_idx + 1)*params.Nx + r] * gdownm;
expiom *= expfac;
}
(mixeddata + n*params.Nx*Nt + r*Nt)[t] = tempupmixed*normierungmixed;
(mixeddata + Nxyt + n*params.Nx*Nt + r*Nt)[t] = tempdownmixed*normierungmixed;
expt *= expNt;
}
// if (r > 3 )exit(-1);
// test<<"&"<<std::endl;
}
// std::cout<<"Initialization took "<<omp_get_wtime() - start<<" seconds"<<std::endl;
delete [] funup;
delete [] fundown;
delete [] Qup;
delete [] Qdown;
}
#ifdef _OPENMP
std::cout<<"Initialization took "<<omp_get_wtime() - start2<<" seconds"<<std::endl;
#endif
delete [] gup;
delete [] gdown;
delete [] omega;
std::cout<<"KondoCloud_Z done"<<std::endl;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
contrib_dry(UP, UP, func);
contrib_dry(UP, DOWN, func);
contrib_dry(DOWN, UP, func);
contrib_dry(DOWN, DOWN, func);
}
/**
this determines the KondoCloud for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& functionpoints;
const FPType beta;
std::complex<FPType>* mixeddata;///< here we store <d^+ gamma>.
std::complex<FPType>* unmixeddata;///< here we store <gamma^+ gamma>
const uint Ny;
const uint Nx;
const uint Nt;
const uint Nw;
const FPType delta_beta;
const uint32_t sites;
const uint Nxyt;
std::complex<FPType> accessmixed(uint r, uint n, SPINS spin1, FPType tau1 , SPINS spin2, FPType tau2) const
{
FPType delta_tau = tau1 - tau2;
FPType sign = 1.0;
std::complex<FPType>* dataptr = mixeddata;
if(spin1 != spin2) return 0.0;//FIXME!!!!!!!!!!!!!!!!!!!! only the case for the spin symmetric case!
if (spin1 == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nx*Nt + r*Nt;
//if(std::abs(delta_tau) < std::numeric_limits<FPType>::epsilon())
if(fpequal(tau1, tau2))
return std::complex<FPType>(dataptr[0]);
if(delta_tau < 0)
{
sign = -1.0;
delta_tau += beta;
}
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're off.
std::size_t tau_idx0 = lround(fptau_idx0);
return std::complex<FPType>(lerp(rem, dataptr[tau_idx0], dataptr[tau_idx0 + 1]))*sign;
}
std::complex<FPType> accessunmixed(uint r, uint n, SPINS spin) const
{
std::complex<FPType>* dataptr = unmixeddata;
if (spin == DOWN) dataptr += sites;
dataptr = dataptr + n*Nx + r;
return dataptr[0];
}
void contrib_dry(SPINS sigma, SPINS sigmaprime, DryRun<typename Configuration::value_type, GFRetVal>& func) const
{//yes, the only thing that we don't derive from tables only depends on sigmaprime
typename GreensFunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
func(v1, v1);
}
GFRetVal dotghelper(FPType tau1, SPINS spin1, FPType tau2, SPINS spin2, const Configuration& config)
{
// auto gdot = [](FPType tau1, SPINS spin1, FPType tau2, SPINS spin2){return GreensFunction::eval(typename GreensFunction::Vertex(tau1, spin1), typename GreensFunction::Vertex(tau2, spin2));};
struct GDot{GFRetVal operator()
(FPType tau1, SPINS spin1, FPType tau2, SPINS spin2){return GreensFunction::eval(typename GreensFunction::Vertex(tau1, spin1), typename GreensFunction::Vertex(tau2, spin2));}
} gdot;
GFRetVal retval = gdot(tau1, spin1, tau2, spin2);
for(uint r = 0; r < config.size(); ++r)
{
FPType taur = config[r].tau;
GFRetVal gtaur_tau1_up = gdot(taur, UP, tau1, spin1);
GFRetVal gtaur_tau1_down = gdot(taur, DOWN, tau1, spin1);
for(uint s = 0; s < config.size(); ++s)
{
FPType taus = config[s].tau;
GFRetVal gtau2_taus_up = gdot(tau2, spin2, taus, UP);
GFRetVal gtau2_taus_down = gdot(tau2, spin2, taus, DOWN);
retval -= gtaur_tau1_up * config.matcont.mat(2*r, 2*s) * gtau2_taus_up;
retval -= gtaur_tau1_down * config.matcont.mat(2*r+1, 2*s) * gtau2_taus_up;
retval -= gtaur_tau1_up * config.matcont.mat(2*r, 2*s+1) * gtau2_taus_down;
retval -= gtaur_tau1_down * config.matcont.mat(2*r+1, 2*s+1) * gtau2_taus_down;
}
}
return retval;
}
};
template <class Config, class GreensFunction, SPINS Spin>
void KondoCloud_Z<Config, GreensFunction, Spin>::evaluate(const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(sites);
typename GreensFunction::Vertex v1(0.0, UP);
typename GreensFunction::Vertex v2(0.0, DOWN);
GFRetVal dotcontrib = dowick(v1, v1) - dowick(v2,v2);
GFRetVal *const dotgf = new GFRetVal[config.size()*2*2*2];
for(uint i = 0; i < config.size(); ++i)//here we sum up < d^+ d(t) >
{
FPType taui = config[i].tau;
dotgf[4*i + 0] = dotghelper(0, UP, taui, UP, config);
dotgf[4*i + 1] = dotghelper(0, UP, taui, DOWN, config);
dotgf[4*i + 2] = dotghelper(0, DOWN, taui, UP, config);
dotgf[4*i + 3] = dotghelper(0, DOWN, taui, DOWN, config);
dotgf[4*(i+config.size()) + 0] = dotghelper(taui, UP, 0, UP, config);
dotgf[4*(i+config.size()) + 1] = dotghelper(taui, UP, 0, DOWN, config);
dotgf[4*(i+config.size()) + 2] = dotghelper(taui, DOWN, 0, UP, config);
dotgf[4*(i+config.size()) + 3] = dotghelper(taui, DOWN, 0, DOWN, config);
}
// std::ofstream kc("kc.txt");
#pragma omp parallel for
for (unsigned int k = 0; k < func.size(); ++k)
{
uint n = k / Nx;
uint r = k % Nx;
GFRetVal unmixedup = accessunmixed(r, n, UP);
GFRetVal unmixeddown = accessunmixed(r, n, DOWN);
GFRetVal mixedupup = accessmixed(r, n, UP, 0.0, UP, 0.0);
GFRetVal mixedupdown = accessmixed(r, n, UP, 0.0, DOWN, 0.0);
GFRetVal mixeddownup = accessmixed(r, n, DOWN, 0.0, UP, 0.0);
GFRetVal mixeddowndown = accessmixed(r, n, DOWN, 0.0, DOWN, 0.0);
GFRetVal mixedupupconj = conj(accessmixed(r, n, UP, 0.0, UP, 0.0));
GFRetVal mixedupdownconj = conj(accessmixed(r, n, UP, 0.0, DOWN, 0.0));
GFRetVal mixeddownupconj = conj(accessmixed(r, n, DOWN, 0.0, UP, 0.0));
GFRetVal mixeddowndownconj = conj(accessmixed(r, n, DOWN, 0.0, DOWN, 0.0));
for(uint q = 0; q < config.size(); ++q)
{
FPType tauq = config[q].tau;
GFRetVal gmixed_rn_tauq_UP_UP = accessmixed(r, n, UP, tauq, UP, 0.0);
GFRetVal gmixed_rn_tauq_DOWN_UP = accessmixed(r, n, DOWN, tauq, UP, 0.0);
GFRetVal gmixed_rn_tauq_UP_DOWN = accessmixed(r, n, UP, tauq, DOWN, 0.0);
GFRetVal gmixed_rn_tauq_DOWN_DOWN = accessmixed(r, n, DOWN, tauq, DOWN, 0.0);
GFRetVal dotq0 = dotgf[4*(q+config.size()) + 0];
GFRetVal dotq1 = dotgf[4*(q+config.size()) + 1];
GFRetVal dotq2 = dotgf[4*(q+config.size()) + 2];
GFRetVal dotq3 = dotgf[4*(q+config.size()) + 3];
for(uint s = 0; s < config.size(); ++s)
{
FPType taus = config[s].tau;
GFRetVal gmixed_rn_taus_UP_UP = conj(accessmixed(r, n, UP, taus, UP, 0.0));
GFRetVal gmixed_rn_taus_UP_DOWN = conj(accessmixed(r, n, UP, taus, DOWN, 0.0));
GFRetVal gmixed_rn_taus_DOWN_UP = conj(accessmixed(r, n, DOWN, taus, UP, 0.0));
GFRetVal gmixed_rn_taus_DOWN_DOWN = conj(accessmixed(r, n, DOWN, taus, DOWN, 0.0));
GFRetVal matqs = config.matcont.mat(2*q, 2*s);
GFRetVal matqsp = config.matcont.mat(2*q, 2*s + 1);
GFRetVal matqps = config.matcont.mat(2*q + 1, 2*s);
GFRetVal matqpsp = config.matcont.mat(2*q + 1, 2*s + 1);
GFRetVal dots0 = dotgf[4*s+0];
GFRetVal dots1 = dotgf[4*s+1];
GFRetVal dots2 = dotgf[4*s+2];
GFRetVal dots3 = dotgf[4*s+3];
unmixedup -= (
matqs * gmixed_rn_tauq_UP_UP * gmixed_rn_taus_UP_UP
+matqsp* gmixed_rn_tauq_UP_UP * gmixed_rn_taus_UP_DOWN
+matqps* gmixed_rn_tauq_DOWN_UP* gmixed_rn_taus_UP_UP
+matqpsp *gmixed_rn_tauq_DOWN_UP* gmixed_rn_taus_UP_DOWN
);
unmixeddown -= (
matqs * gmixed_rn_tauq_UP_DOWN * gmixed_rn_taus_DOWN_UP
+matqsp* gmixed_rn_tauq_UP_DOWN * gmixed_rn_taus_DOWN_DOWN
+matqps* gmixed_rn_tauq_DOWN_DOWN* gmixed_rn_taus_DOWN_UP
+matqpsp *gmixed_rn_tauq_DOWN_DOWN* gmixed_rn_taus_DOWN_DOWN
);
mixedupup -= (
matqs * gmixed_rn_tauq_UP_UP * dots0
+matqsp* gmixed_rn_tauq_UP_UP * dots1
+matqps* gmixed_rn_tauq_DOWN_UP* dots0
+matqpsp *gmixed_rn_tauq_DOWN_UP* dots1
);
mixedupdown -= (
matqs * gmixed_rn_tauq_UP_DOWN * dots0
+matqsp* gmixed_rn_tauq_UP_DOWN * dots1
+matqps* gmixed_rn_tauq_DOWN_DOWN* dots0
+matqpsp *gmixed_rn_tauq_DOWN_DOWN* dots1
);
mixeddownup -= (
matqs * gmixed_rn_tauq_UP_UP * dots2
+matqsp* gmixed_rn_tauq_UP_UP * dots3
+matqps* gmixed_rn_tauq_DOWN_UP* dots2
+matqpsp *gmixed_rn_tauq_DOWN_UP* dots3
);
mixeddowndown -= (
matqs * gmixed_rn_tauq_UP_DOWN * dots2
+matqsp* gmixed_rn_tauq_UP_DOWN * dots3
+matqps* gmixed_rn_tauq_DOWN_DOWN* dots2
+matqpsp *gmixed_rn_tauq_DOWN_DOWN* dots3
);
GFRetVal gmixed_rn_UP_UP_taus = conj(accessmixed(r, n, UP, 0.0, UP, taus));
GFRetVal gmixed_rn_UP_DOWN_taus = conj(accessmixed(r, n, UP, 0.0, DOWN, taus));
mixedupupconj -= (
matqs * gmixed_rn_UP_UP_taus * dotq0
+matqsp * gmixed_rn_UP_DOWN_taus * dotq0
+matqps * gmixed_rn_UP_UP_taus* dotq2
+matqpsp * gmixed_rn_UP_DOWN_taus* dotq2
);
mixedupdownconj -= (
matqs * gmixed_rn_UP_UP_taus * dotq1
+matqsp * gmixed_rn_UP_DOWN_taus * dotq1
+matqps * gmixed_rn_UP_UP_taus* dotq3
+matqpsp * gmixed_rn_UP_DOWN_taus* dotq3
);
GFRetVal gmixed_rn_DOWN_DOWN_taus = conj(accessmixed(r, n, DOWN, 0.0, DOWN, taus));
GFRetVal gmixed_rn_DOWN_UP_taus = conj(accessmixed(r, n, DOWN, 0.0, UP, taus));
mixeddownupconj -= (
matqs * gmixed_rn_DOWN_UP_taus * dotq0
+matqsp * gmixed_rn_DOWN_DOWN_taus * dotq0
+matqps * gmixed_rn_DOWN_UP_taus* dotq2
+matqpsp * gmixed_rn_DOWN_DOWN_taus* dotq2
);
mixeddowndownconj -= (
matqs * gmixed_rn_DOWN_UP_taus * dotq1
+matqsp * gmixed_rn_DOWN_DOWN_taus * dotq1
+matqps * gmixed_rn_DOWN_UP_taus* dotq3
+matqpsp * gmixed_rn_DOWN_DOWN_taus* dotq3
);
}
}
//add to measurement
func[k] = (dotcontrib * (unmixedup - unmixeddown) - mixedupup*mixedupupconj
/*+ mixedupdown*mixedupdownconj + mixeddownup*mixeddownupconj*/
- mixeddowndown*mixeddowndownconj)* config.phase/4.0;
//kc<<dotcontrib <<" "<< (unmixedup - unmixeddown)<<" "<<mixedupup<<" "<<mixedupupconj<<" "<< mixedupdown<<" "<<mixedupdownconj <<" "<< mixeddownup<<" "<<mixeddownupconj <<" "<< mixeddowndown<<" "<<mixeddowndownconj<<std::endl;
}
// exit(-1);
this->add_bin(func);
delete [] dotgf;
return;
}
/**
A class for measuring the Kondo-cloud as evidenced by the correlation function
<S^x_d S^x_c (r)>
where d denotes the dot electron and c the bath electron. All this as a function of distance r from the dot.
*/
template <class Config, class GreensFunction, SPINS Spin>
class KondoCloud_X : public Network_Cache<Config, std::valarray<typename Config::SignType> >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GreensFunction GF;
typedef std::valarray<GFRetVal> Function;
typedef std::valarray<GFRetVal> ObservableType;///<the Kondo Cloud has no time-dependence. it depends only on the position
/**
The Constructor for the KondoCloud measured along the X- direction
*/
KondoCloud_X(typename Config::Comm& n, const Parameters& params)/* throw()*/ : Network_Cache<Config, ObservableType>(n, "KondoCloud_X"), functionpoints(params.functionpoints), beta(params.beta), Ny(params.Nb*2),
Nx(params.Nx),
Nt(300 + 1),
Nw(4*Nt),
delta_beta(params.beta/static_cast<FPType>(Nt-1)),
sites(Ny*params.Nx),
Nxyt(sites*Nt)
{
#ifdef _OPENMP
double start2 = omp_get_wtime();
#endif
std::cout<<"Creating KondoCloud_X"<<std::endl;
const GOmegaData<FPType> *const updata = GF::gomegaup->data;
const GOmegaData<FPType> *const downdata = GF::gomegadown->data;
//let's set up the Q(r, n, omega_n) data
Omegadata<FPType>* omega = new Omegadata<FPType>[Nw/2];
std::complex<FPType>* gup = new std::complex<FPType>[Nw];
std::complex<FPType>* gdown = new std::complex<FPType>[Nw];
for (int t = 0; t < static_cast<int>(Nw/2); ++t)
{
gup[2*t] = conj((*GF::gomegaup)(t));
gup[2*t + 1] = conj((*GF::gomegaup)( -t - 1));
gdown[2*t] = conj((*GF::gomegadown)(t));
gdown[2*t + 1] = conj((*GF::gomegadown)( -t - 1));
omega[t].omega = M_PI/params.beta*(2*t + 1);
omega[t].omegasq = omega[t].omega*omega[t].omega;
omega[t].invomega = 1.0/omega[t].omega;
}
FPType pref = GF::gethybridization() / std::sqrt(params.Nx);
unmixeddata = new std::complex<FPType>[2*sites];
//Since we have a simple, analytical expression for the tau dependence of the offset of the unmixed greensfunctions
//we write that one out first to the unmixeddata array
FPType* cosharrayup = new FPType[sites];
FPType* cosharraydown = new FPType[sites];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
cosharrayup[k*Ny + m] = 1.0/std::cosh(params.beta*updata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
cosharraydown[k*Ny + m] = 1.0/std::cosh(params.beta*downdata[k*Ny + m].lambda/2.0)/2.0/static_cast<FPType>(params.Nx);
}
#pragma omp parallel for
for(uint n = 0; n < Ny; ++n)
{
FPType* tempup = new FPType[sites];
FPType* tempdown = new FPType[sites];
for(uint k = 0; k < params.Nx; ++k)
for(uint m = 0; m < Ny; ++m)
{
tempup[k*Ny + m] = cosharrayup[k*Ny + m] * norm(updata[k*Ny + /*n*/m].evec[/*m*/n]);
tempdown[k*Ny + m] = cosharraydown[k*Ny + m] * norm(downdata[k*Ny + /*n*/m].evec[/*m*/n]);
}
FPType ftup = 0.0;
FPType ftdown = 0.0;
FPType arg = - params.beta/2.0;//from that we only need a tau == 0 quantity
FPType cup = 0.0;
FPType cdown = 0.0;
for(uint k = 0; k < params.Nx; ++k)//being careful we employ Kahan summation
for(uint m = 0; m < Ny; ++m)
{
FPType argup = tempup[k*Ny + m] * std::exp(updata[k*Ny + m].lambda*arg);
FPType argdown = tempdown[k*Ny + m] * std::exp(downdata[k*Ny + m].lambda*arg);
FPType y = argup - cup;
FPType t = ftup + y;
cup = (t - ftup) - y;
ftup = t;
y = argdown - cdown;
t = ftdown + y;
cdown = (t - ftdown) - y;
ftdown = t;
}
for(uint r = 0; r < params.Nx; ++r)
{
(unmixeddata + n*params.Nx)[r] = static_cast<float>(ftup);
(unmixeddata + sites + n*params.Nx)[r] = static_cast<float>(ftdown);
}
delete [] tempup;
delete [] tempdown;
}
delete [] cosharrayup;
delete [] cosharraydown;
const unsigned int Nxw = Nw*params.Nx;
FPType normierungunmixed = 1.0/params.beta / params.Nx;
FPType normierungmixed = 1.0/params.beta / std::sqrt(params.Nx);
const std::complex<FPType> expNt = std::exp(std::complex<FPType>(0.0, -M_PI/(Nt-1)));
mixeddata = new std::complex<FPType>[2*Nxyt];
#pragma omp parallel for
for (uint n = 0; n < Ny; ++n)
{
std::complex<FPType>* Qup = new std::complex<FPType>[Nxw];
std::complex<FPType>* Qdown = new std::complex<FPType>[Nxw];
std::complex<FPType>* funup = new std::complex<FPType>[Nxw];
std::complex<FPType>* fundown = new std::complex<FPType>[Nxw];
memset(funup, 0, Nxw*sizeof(std::complex<FPType>));
memset(fundown, 0, Nxw*sizeof(std::complex<FPType>));
// double start = omp_get_wtime();
// std::cout<<"n = "<<n<<std::endl;
for (uint k = 0; k < params.Nx; ++k)
{
for (uint m = 0; m < Ny; ++m)
{
std::complex<FPType> facup = conj(updata [k*Ny + m].u) * updata[k*Ny + m].evec[n];
std::complex<FPType> facdown = conj(downdata[k*Ny + m].u) * downdata[k*Ny + m].evec[n];
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{//the layout of the frequencies is now (w_n, -w_n) , that is every negative frequency is stored next to its positive counterpart.
//Hopefully this gives a better data locality
funup[2*omega_idx*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, -omega[omega_idx].omega);
funup[(2*omega_idx + 1)*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, omega[omega_idx].omega);
fundown[2*omega_idx*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, -omega[omega_idx].omega);
fundown[(2*omega_idx + 1)*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, omega[omega_idx].omega);
}
}
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint w = 0; w < Nw; ++w)
{
fourier1(reinterpret_cast<FPType*>(funup + w*params.Nx), params.Nx, 1);
fourier1(reinterpret_cast<FPType*>(fundown + w*params.Nx), params.Nx, 1);
//funup as well as fundown now contain Q(r, i omega) for a particular value of the orbital n
for (uint r = 0; r < params.Nx; ++r)
{
funup[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
fundown[w*params.Nx + r] *= pref; // == Q. pref == V/sqrt(L)
(Qup + r*Nw)[w] = funup[w*params.Nx + r];//norm(funup[w*params.Nx + r]);
(Qdown + r*Nw)[w] = fundown[w*params.Nx + r];//norm(fundown[w*params.Nx + r]);
// funup[w*params.Nx + r] = funup[w*params.Nx + r];
// fundown[w*params.Nx + r] = fundown[w*params.Nx + r];
}
}
for(uint r = 0; r < params.Nx; ++r)
for(uint w = 0; w < Nw/2; ++w)
{
std::complex<FPType> temp = conj((Qup + r*Nw)[2*w]);
(Qup + r*Nw)[2*w] *= conj((Qup + r*Nw)[2*w+1]);
(Qup + r*Nw)[2*w+1] *= temp;
temp = conj((Qdown + r*Nw)[2*w]);
(Qdown + r*Nw)[2*w] *= conj((Qdown + r*Nw)[2*w+1]);
(Qdown + r*Nw)[2*w+1] *= temp;
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint r = 0; r < params.Nx; ++r)
{
std::complex<FPType> expt = 1;
{//an empty block for the unmixeddata
std::complex<FPType> tempup = 0;
std::complex<FPType> tempdown = 0;
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
tempup += (Qup + r * Nw)[2*omega_idx] * gupp + (Qup + r * Nw)[2*omega_idx + 1] * gupm;
tempdown += (Qdown + r * Nw)[2*omega_idx] * gdownp + (Qdown + r * Nw)[2*omega_idx + 1] * gdownm;
}
(unmixeddata + n*params.Nx)[r] += tempup*normierungunmixed;
(unmixeddata + sites + n*params.Nx)[r] += tempdown*normierungunmixed;
}
for(uint t = 0; t < Nt; ++t)// here is the final Matsubara transform
{
std::complex<FPType> tempupmixed = 0;
std::complex<FPType> tempdownmixed = 0;
std::complex<FPType> expiom = expt;
std::complex<FPType> expfac = expiom*expiom;
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
std::complex<FPType> cexpiom = conj(expiom);
tempupmixed += cexpiom * funup[(2*omega_idx)*params.Nx + r] * gupp + expiom * funup[(2*omega_idx + 1)*params.Nx + r] * gupm;
tempdownmixed += cexpiom * fundown[(2*omega_idx)*params.Nx + r] * gdownp + expiom * fundown[(2*omega_idx + 1)*params.Nx + r] * gdownm;
expiom *= expfac;
}
(mixeddata + n*params.Nx*Nt + r*Nt)[t] = tempupmixed*normierungmixed;
(mixeddata + Nxyt + n*params.Nx*Nt + r*Nt)[t] = tempdownmixed*normierungmixed;
expt *= expNt;
}
// if (r > 3 )exit(-1);
// test<<"&"<<std::endl;
}
// std::cout<<"Initialization took "<<omp_get_wtime() - start<<" seconds"<<std::endl;
delete [] funup;
delete [] fundown;
delete [] Qup;
delete [] Qdown;
}
#ifdef _OPENMP
std::cout<<"Initialization took "<<omp_get_wtime() - start2<<" seconds"<<std::endl;
#endif
delete [] gup;
delete [] gdown;
delete [] omega;
std::cout<<"KondoCloud_X done"<<std::endl;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
contrib_dry(UP, UP, func);
contrib_dry(UP, DOWN, func);
contrib_dry(DOWN, UP, func);
contrib_dry(DOWN, DOWN, func);
}
/**
this determines the KondoCloud_X for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& functionpoints;
const FPType beta;
std::complex<FPType>* mixeddata;///< here we store <d^+ gamma>.
std::complex<FPType>* unmixeddata;///< here we store <gamma^+ gamma>
const uint Ny;
const uint Nx;
const uint Nt;
const uint Nw;
const FPType delta_beta;
const uint32_t sites;
const uint Nxyt;
std::complex<FPType> accessmixed(uint r, uint n, SPINS spin1, FPType tau1 , SPINS spin2, FPType tau2) const
{
FPType delta_tau = tau1 - tau2;
FPType sign = 1.0;
std::complex<FPType>* dataptr = mixeddata;
if(spin1 != spin2) return 0.0;//FIXME!!!!!!!!!!!!!!!!!!!! only the case for the spin symmetric case!
if (spin1 == DOWN) dataptr += Nxyt;
dataptr = dataptr + n*Nx*Nt + r*Nt;
// if(std::abs(delta_tau) < std::numeric_limits<FPType>::epsilon())
if(fpequal(tau1, tau2))
return std::complex<FPType>(dataptr[0]);
if(delta_tau < 0)
{
sign = -1.0;
delta_tau += beta;
}
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're off.
std::size_t tau_idx0 = lround(fptau_idx0);
return std::complex<FPType>(lerp(rem, dataptr[tau_idx0], dataptr[tau_idx0 + 1]))*sign;
}
std::complex<FPType> accessunmixed(uint r, uint n, SPINS spin) const
{
std::complex<FPType>* dataptr = unmixeddata;
if (spin == DOWN) dataptr += sites;
dataptr = dataptr + n*Nx + r;
return dataptr[0];
}
void contrib_dry(SPINS sigma, SPINS sigmaprime, DryRun<typename Configuration::value_type, GFRetVal>& func)
{//yes, the only thing that we don't derive from tables only depends on sigmaprime
typename GreensFunction::Vertex v1;
v1.spin = sigmaprime;
v1.tau = 0;
func(v1, v1);
}
GFRetVal dotghelper(FPType tau1, SPINS spin1, FPType tau2, SPINS spin2, const Configuration config)
{
// auto gdot = [](FPType tau1, SPINS spin1, FPType tau2, SPINS spin2){return GreensFunction::eval(typename GreensFunction::Vertex(tau1, spin1), typename GreensFunction::Vertex(tau2, spin2));};
struct GDot{GFRetVal operator()
(FPType tau1, SPINS spin1, FPType tau2, SPINS spin2){return GreensFunction::eval(typename GreensFunction::Vertex(tau1, spin1), typename GreensFunction::Vertex(tau2, spin2));}
} gdot;
GFRetVal retval = gdot(tau1, spin1, tau2, spin2);
for(uint r = 0; r < config.size(); ++r)
for(uint s = 0; s < config.size(); ++s)
{
retval -= gdot(config[r].tau, UP, tau1, spin1) * config.matcont.mat(2*r, 2*s) * gdot(tau2, spin2, config[s].tau, UP);
retval -= gdot(config[r].tau, DOWN, tau1, spin1) * config.matcont.mat(2*r+1, 2*s) * gdot(tau2, spin2, config[s].tau, UP);
retval -= gdot(config[r].tau, UP, tau1, spin1) * config.matcont.mat(2*r, 2*s+1) * gdot(tau2, spin2, config[s].tau, DOWN);
retval -= gdot(config[r].tau, DOWN, tau1, spin1) * config.matcont.mat(2*r+1, 2*s+1) * gdot(tau2, spin2, config[s].tau, DOWN);
}
return retval;
}
};
template <class Config, class GreensFunction, SPINS Spin>
void KondoCloud_X<Config, GreensFunction, Spin>::evaluate(const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(sites);
typename GreensFunction::Vertex v1(0.0, UP);
typename GreensFunction::Vertex v2(0.0, DOWN);
GFRetVal dotcontrib = dowick(v1, v1) - dowick(v2,v2);
GFRetVal *const dotgf = new GFRetVal[config.size()*2*2*2];
for(uint i = 0; i < config.size(); ++i)//here we sum up < d^+ d(t) >
{
dotgf[4*i + 0] = dotghelper(0, UP, config[i].tau, UP, config);
dotgf[4*i + 1] = dotghelper(0, UP, config[i].tau, DOWN, config);
dotgf[4*i + 2] = dotghelper(0, DOWN, config[i].tau, UP, config);
dotgf[4*i + 3] = dotghelper(0, DOWN, config[i].tau, DOWN, config);
}
for(uint i = 0; i < config.size(); ++i)//here we sum up < d^+ d(t) >
{
dotgf[4*(i+config.size()) + 0] = dotghelper(config[i].tau, UP, 0, UP, config);
dotgf[4*(i+config.size()) + 1] = dotghelper(config[i].tau, UP, 0, DOWN, config);
dotgf[4*(i+config.size()) + 2] = dotghelper(config[i].tau, DOWN, 0, UP, config);
dotgf[4*(i+config.size()) + 3] = dotghelper(config[i].tau, DOWN, 0, DOWN, config);
}
// std::ofstream kc("kc.txt");
#pragma omp parallel for
for (unsigned int k = 0; k < func.size(); ++k)
{
uint n = k / Nx;
uint r = k % Nx;
// GFRetVal unmixedup = accessunmixed(r, n, UP);
// GFRetVal unmixeddown = accessunmixed(r, n, DOWN);
GFRetVal mixedupup = accessmixed(r, n, UP, 0.0, UP, 0.0);
// GFRetVal mixedupdown = accessmixed(r, n, UP, 0.0, DOWN, 0.0);
// GFRetVal mixeddownup = accessmixed(r, n, DOWN, 0.0, UP, 0.0);
GFRetVal mixeddowndown = accessmixed(r, n, DOWN, 0.0, DOWN, 0.0);
GFRetVal mixedupupconj = conj(accessmixed(r, n, UP, 0.0, UP, 0.0));
// GFRetVal mixedupdownconj = conj(accessmixed(r, n, UP, 0.0, DOWN, 0.0));
// GFRetVal mixeddownupconj = conj(accessmixed(r, n, DOWN, 0.0, UP, 0.0));
GFRetVal mixeddowndownconj = conj(accessmixed(r, n, DOWN, 0.0, DOWN, 0.0));
for(uint q = 0; q < config.size(); ++q)
for(uint s = 0; s < config.size(); ++s)
{
// unmixedup -= (
// config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, UP, 0.0) *conj(accessmixed(r, n, UP, config[s].tau, UP, 0.0))
// +config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, UP, 0.0) *conj(accessmixed(r, n, UP, config[s].tau, DOWN, 0.0))
// +config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)*conj(accessmixed(r, n, UP, config[s].tau, UP, 0.0))
// +config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)*conj(accessmixed(r, n, UP, config[s].tau, DOWN, 0.0))
// );
// unmixeddown -= (
// config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) *conj(accessmixed(r, n, DOWN, config[s].tau, UP, 0.0))
// +config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) *conj(accessmixed(r, n, DOWN, config[s].tau, DOWN, 0.0))
// +config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)*conj(accessmixed(r, n, DOWN, config[s].tau, UP, 0.0))
// +config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)*conj(accessmixed(r, n, DOWN, config[s].tau, DOWN, 0.0))
// );
typename GreensFunction::Vertex v1u(0.0, UP);
typename GreensFunction::Vertex v2su(config[s].tau, UP);
typename GreensFunction::Vertex v1d(0.0, DOWN);
typename GreensFunction::Vertex v2sd(config[s].tau, DOWN);
mixedupup -= (
config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, UP, 0.0) * dotgf[4*s+0]
+config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, UP, 0.0) * dotgf[4*s+1]
+config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)* dotgf[4*s+0]
+config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)* dotgf[4*s+1]
);
// mixedupdown -= (
// config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) * dotgf[4*s+0]
// +config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) * dotgf[4*s+1]
// +config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)* dotgf[4*s+0]
// +config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)* dotgf[4*s+1]
// );
//
// mixeddownup -= (
// config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, UP, 0.0) * dotgf[4*s+2]
// +config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, UP, 0.0) * dotgf[4*s+3]
// +config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)* dotgf[4*s+2]
// +config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, UP, 0.0)* dotgf[4*s+3]
// );
mixeddowndown -= (
config.matcont.mat(2*q, 2*s) * accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) * dotgf[4*s+2]
+config.matcont.mat(2*q, 2*s+1)* accessmixed(r, n, UP, config[q].tau, DOWN, 0.0) * dotgf[4*s+3]
+config.matcont.mat(2*q+1, 2*s)* accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)* dotgf[4*s+2]
+config.matcont.mat(2*q+1, 2*s+1) *accessmixed(r, n, DOWN, config[q].tau, DOWN, 0.0)* dotgf[4*s+3]
);
typename GreensFunction::Vertex v2qu(config[q].tau, UP);
typename GreensFunction::Vertex v2qd(config[q].tau, DOWN);
mixedupupconj -= (
config.matcont.mat(2*q, 2*s) * conj(accessmixed(r, n, UP, 0.0, UP, config[s].tau)) * dotgf[4*(q+config.size()) + 0]
+config.matcont.mat(2*q, 2*s+1)* conj(accessmixed(r, n, UP, 0.0, DOWN, config[s].tau)) * dotgf[4*(q+config.size()) + 0]
+config.matcont.mat(2*q+1, 2*s)* conj(accessmixed(r, n, UP, 0.0, UP, config[s].tau))* dotgf[4*(q+config.size()) + 2]
+config.matcont.mat(2*q+1, 2*s+1) *conj(accessmixed(r, n, UP, 0.0, DOWN, config[s].tau))* dotgf[4*(q+config.size()) + 2]
);
// mixedupdownconj -= (
// config.matcont.mat(2*q, 2*s) * conj(accessmixed(r, n, UP, 0.0, UP, config[s].tau)) * dotgf[4*(q+config.size()) + 1]
// +config.matcont.mat(2*q, 2*s+1)* conj(accessmixed(r, n, UP, 0.0, DOWN, config[s].tau)) * dotgf[4*(q+config.size()) + 1]
// +config.matcont.mat(2*q+1, 2*s)* conj(accessmixed(r, n, UP, 0.0, UP, config[s].tau))* dotgf[4*(q+config.size()) + 3]
// +config.matcont.mat(2*q+1, 2*s+1) *conj(accessmixed(r, n, UP, 0.0, DOWN, config[s].tau))* dotgf[4*(q+config.size()) + 3]
// );
//
// mixeddownupconj -= (
// config.matcont.mat(2*q, 2*s) * conj(accessmixed(r, n, DOWN, 0.0, UP, config[s].tau)) * dotgf[4*(q+config.size()) + 0]
// +config.matcont.mat(2*q, 2*s+1)* conj(accessmixed(r, n, DOWN, 0.0, DOWN, config[s].tau)) * dotgf[4*(q+config.size()) + 0]
// +config.matcont.mat(2*q+1, 2*s)* conj(accessmixed(r, n, DOWN, 0.0, UP, config[s].tau))* dotgf[4*(q+config.size()) + 2]
// +config.matcont.mat(2*q+1, 2*s+1) *conj(accessmixed(r, n, DOWN, 0.0, DOWN, config[s].tau))* dotgf[4*(q+config.size()) + 2]
// );
mixeddowndownconj -= (
config.matcont.mat(2*q, 2*s) * conj(accessmixed(r, n, DOWN, 0.0, UP, config[s].tau)) * dotgf[4*(q+config.size()) + 1]
+config.matcont.mat(2*q, 2*s+1)* conj(accessmixed(r, n, DOWN, 0.0, DOWN, config[s].tau)) * dotgf[4*(q+config.size()) + 1]
+config.matcont.mat(2*q+1, 2*s)* conj(accessmixed(r, n, DOWN, 0.0, UP, config[s].tau))* dotgf[4*(q+config.size()) + 3]
+config.matcont.mat(2*q+1, 2*s+1) *conj(accessmixed(r, n, DOWN, 0.0, DOWN, config[s].tau))* dotgf[4*(q+config.size()) + 3]
);
}
//add to measurement
//note that this formula is the relevant one if the dot greens function and the mixed <dc> greensfunctions are spin diagonal
func[k] = ( - mixedupup*mixeddowndownconj - mixeddowndown*mixedupupconj)* config.phase/4.0;
//kc<<dotcontrib <<" "<< (unmixedup - unmixeddown)<<" "<<mixedupup<<" "<<mixedupupconj<<" "<< mixedupdown<<" "<<mixedupdownconj <<" "<< mixeddownup<<" "<<mixeddownupconj <<" "<< mixeddowndown<<" "<<mixeddowndownconj<<std::endl;
}
// exit(-1);
this->add_bin(func);
delete [] dotgf;
return;
}
/**
A class for measuring the hybridization:
<d^\dagger_{-\sigma} a_{0,-sigma}>
*/
template <class Config, class GreensFunction, SPINS Spin>
class Hybridization : public Network_Cache<Config, typename Config::SignType>
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef GreensFunction GF;
typedef std::valarray<GFRetVal> Function;
typedef GFRetVal ObservableType;///<the Hybridization is only a complex number
/**
The Constructor for the Hybrization
*/
Hybridization(typename Config::Comm& net, const Parameters& params)/* throw()*/ : Network_Cache<Config, ObservableType>(net, "Hybridization"), beta(params.beta), Ny(params.Nb*2), Nx(params.Nx), Nt(400 + 1), Nw(4*Nt),
delta_beta(params.beta/static_cast<FPType>(Nt-1))
{
#ifdef _OPENMP
double start2 = omp_get_wtime();
#endif
std::cout<<"Creating Data for Hybridization"<<std::endl;
const GOmegaData<FPType> *const updata = GF::gomegaup->data;
const GOmegaData<FPType> *const downdata = GF::gomegadown->data;
Omegadata<FPType>* omega = new Omegadata<FPType>[Nw/2];
std::complex<FPType>* gup = new std::complex<FPType>[Nw];
std::complex<FPType>* gdown = new std::complex<FPType>[Nw];
for (int t = 0; t < static_cast<int>(Nw/2); ++t)
{
gup[2*t] = conj((*GF::gomegaup)(t));
gup[2*t + 1] = conj((*GF::gomegaup)( -t - 1));
gdown[2*t] = conj((*GF::gomegadown)(t));
gdown[2*t + 1] = conj((*GF::gomegadown)( -t - 1));
omega[t].omega = M_PI/params.beta*(2*t + 1);
omega[t].omegasq = omega[t].omega*omega[t].omega;
omega[t].invomega = 1.0/omega[t].omega;
}
FPType pref = GF::gethybridization() / std::sqrt(params.Nx);
const unsigned int Nxw = Nw*params.Nx;
FPType normierungmixed = 1.0/params.beta / std::sqrt(params.Nx);
const std::complex<FPType> expNt = std::exp(std::complex<FPType>(0.0, -M_PI/(Nt-1)));
mixeddata = new std::complex<FPType>[2*Nt];
std::complex<FPType>* funup = new std::complex<FPType>[Nxw];
std::complex<FPType>* fundown = new std::complex<FPType>[Nxw];
memset(funup, 0, Nxw*sizeof(std::complex<FPType>));
memset(fundown, 0, Nxw*sizeof(std::complex<FPType>));
// double start = omp_get_wtime();
for (uint k = 0; k < params.Nx; ++k)
{
for (uint m = 0; m < Ny; ++m)
{
std::complex<FPType> facup = conj(updata [k*Ny + m].u) * updata[k*Ny + m].evec[0];
std::complex<FPType> facdown = conj(downdata[k*Ny + m].u) * downdata[k*Ny + m].evec[0];
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{//the layout of the frequencies is now (w_n, -w_n) , that is every negative frequency is stored next to its positive counterpart.
//Hopefully this gives a better data locality
funup[2*omega_idx*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, -omega[omega_idx].omega);
funup[(2*omega_idx + 1)*params.Nx + k] += facup/std::complex<FPType>(-updata[k*Ny + m].lambda, omega[omega_idx].omega);
fundown[2*omega_idx*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, -omega[omega_idx].omega);
fundown[(2*omega_idx + 1)*params.Nx + k] += facdown/std::complex<FPType>(-downdata[k*Ny + m].lambda, omega[omega_idx].omega);
}
}
}
// std::cout<<"time now: "<<omp_get_wtime() - start<<std::endl;
for (uint w = 0; w < Nw; ++w)
{
fourier1(reinterpret_cast<FPType*>(funup + w*params.Nx), params.Nx, 1);
fourier1(reinterpret_cast<FPType*>(fundown + w*params.Nx), params.Nx, 1);
//funup as well as fundown now contain Q(r, i omega) for for n = 0
funup[w*params.Nx] *= pref; // == Q. pref == V/sqrt(L)
fundown[w*params.Nx] *= pref; // == Q. pref == V/sqrt(L)
}
std::complex<FPType> expt = 1;
for(uint t = 0; t < Nt; ++t)// here is the final Matsubara transform
{
std::complex<FPType> tempupmixed = 0;
std::complex<FPType> tempdownmixed = 0;
std::complex<FPType> expiom = expt;
std::complex<FPType> expfac = expiom*expiom;
for (int omega_idx = 0; omega_idx < static_cast<int>(Nw/2); ++omega_idx)
{
std::complex<FPType> gupp = gup[2*omega_idx];
std::complex<FPType> gupm = gup[2*omega_idx + 1];
std::complex<FPType> gdownp = gdown[2*omega_idx];
std::complex<FPType> gdownm = gdown[2*omega_idx + 1];
std::complex<FPType> cexpiom = conj(expiom);
tempupmixed += cexpiom * funup[(2*omega_idx)*params.Nx] * gupp + expiom * funup[(2*omega_idx + 1)*params.Nx] * gupm;
tempdownmixed += cexpiom * fundown[(2*omega_idx)*params.Nx] * gdownp + expiom * fundown[(2*omega_idx + 1)*params.Nx] * gdownm;
expiom *= expfac;
}
mixeddata[t] = tempupmixed*normierungmixed;
(mixeddata + Nt)[t] = tempdownmixed*normierungmixed;
expt *= expNt;
}
// std::cout<<"Initialization took "<<omp_get_wtime() - start<<" seconds"<<std::endl;
delete [] funup;
delete [] fundown;
#ifdef _OPENMP
std::cout<<"Initialization took "<<omp_get_wtime() - start2<<" seconds"<<std::endl;
#endif
std::ofstream file("goff.txt");
for(uint k = 0; k < Nt; ++k)
file<<mixeddata[k]<<std::endl;
delete [] gup;
delete [] gdown;
delete [] omega;
std::cout<<"Hybridization done"<<std::endl;
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
}
/**
this determines the Hybridization for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const FPType beta;
std::complex<FPType>* mixeddata;///< here we store <d^+ gamma>.
const uint Ny;
const uint Nx;
const uint Nt;
const uint Nw;
const FPType delta_beta;
std::complex<FPType> accessmixed(SPINS spin1, FPType tau1 , SPINS spin2, FPType tau2) const
{
FPType delta_tau = tau1 - tau2;
FPType sign = 1.0;
std::complex<FPType>* dataptr = mixeddata;
if(spin1 != spin2) return 0.0;//FIXME!!!!!!!!!!!!!!!!!!!! only the case for the spin symmetric case!
if (spin1 == DOWN) dataptr += Nt;
if(std::abs(delta_tau) < std::numeric_limits<FPType>::epsilon())
return std::complex<FPType>(dataptr[0]);
if(delta_tau < 0)
{
sign = -1.0;
delta_tau += beta;
}
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/delta_beta, &fptau_idx0);//round to the smaller index and determine how far we're off.
std::size_t tau_idx0 = lround(fptau_idx0);
return std::complex<FPType>(lerp(rem, dataptr[tau_idx0], dataptr[tau_idx0 + 1]))*sign;
}
};
template <class Config, class GreensFunction, SPINS Spin>
void Hybridization<Config, GreensFunction, Spin>::evaluate(const Configuration& config, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType retval = accessmixed(!Spin, 0, !Spin, 0);
struct GDot{GFRetVal operator()
(FPType tau2, SPINS spin2){return GreensFunction::eval(typename GreensFunction::Vertex(0, !Spin), typename GreensFunction::Vertex(tau2, spin2));}
} gdot;
for(uint q = 0; q < config.size(); ++q)
for(uint s = 0; s < config.size(); ++s)
{
retval -= (
config.matcont.mat(2*q, 2*s) * accessmixed(UP, config[q].tau, !Spin, 0.0) * gdot(config[s].tau, UP)
+config.matcont.mat(2*q, 2*s+1)* accessmixed(UP, config[q].tau, !Spin, 0.0) * gdot(config[s].tau, DOWN)
+config.matcont.mat(2*q+1, 2*s)* accessmixed(DOWN, config[q].tau, !Spin, 0.0)* gdot(config[s].tau, UP)
+config.matcont.mat(2*q+1, 2*s+1)*accessmixed(DOWN, config[q].tau, !Spin, 0.0)* gdot(config[s].tau, DOWN)
);
}
//add to measurement
this->add_bin(retval*config.phase);
return;
}
/**
* Depending on s this measures the charge charge( s=1) correlation function or the spin-spin (s=-1) correlation function
* Note also that by its bare definition it is a real quantity.
* */
template <class Config, int s>
class SpinChargeParent : public Network_Cache<Config, std::valarray<std::valarray<typename Config::Configuration::FPType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<FPType> Function;
typedef std::valarray<Function> ObservableType;///< spin and charge correlations are potentially complex in kspace and are spatially resolved time-dependent observable
/**
The Constructor
*/
SpinChargeParent(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config,ObservableType>(n, (s==1)?"CharcheChargeCorrelation":"SpinSpinCorrelation"),
len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the observable for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config, int cs>
void SpinChargeParent<Config, cs>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
const typename Configuration::value_type v2(0, 0, UP);
const typename Configuration::value_type v4(0, 0, DOWN);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
if(j == 0)
s = 0.000001;//seems to be necessary here...
for(uint r = 0; r < len; ++r)
{
const typename Configuration::value_type v1(r, s, UP);
const typename Configuration::value_type v3(r, s, DOWN);
genericTwoParticleGreensfunction_dry<Configuration>(func, v1, v1, v2, v2);
genericTwoParticleGreensfunction_dry<Configuration>(func, v3, v3, v4, v4);
genericTwoParticleGreensfunction_dry<Configuration>(func, v1, v1, v4, v4);
genericTwoParticleGreensfunction_dry<Configuration>(func, v3, v3, v2, v2);
}
}
return;
}
template <typename T>
struct Help
{
typedef T RetType;
static inline RetType toreal(T a)
{
return a;
}
};
template <typename FPType>
struct Help<std::complex<FPType> >
{
typedef FPType RetType;
static inline RetType toreal(std::complex<FPType> a)
{
return a.real();
}
};
template <class Config, int cs>
void SpinChargeParent<Config, cs>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
const FPType fac = static_cast<FPType>(TWOPI / len);
const FPType csfac = (cs == 1? 1.0: 0.25);//switch prefactors
const typename Config::SignType phase_factor = configuration.phase * csfac;
const typename Configuration::value_type v2(0, 0, UP);
const typename Configuration::value_type v4(0, 0, DOWN);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
if(j == 0)
s = 0.000001;//seems to be necessary here, to get the correct function value at tau=0...
GFRetVal sum = 0;
for(uint r = 0; r < len; ++r)
{
FPType pref = std::cos(fac * (k*r));
const typename Configuration::value_type v1(r, s, UP);
const typename Configuration::value_type v3(r, s, DOWN);
auto sum2 = genericTwoParticleGreensfunction<Configuration>(dowick, v1, v1, v2, v2);//to deduce type
sum2 += genericTwoParticleGreensfunction<Configuration>(dowick, v3, v3, v4, v4)
+ FPType(cs)*(genericTwoParticleGreensfunction<Configuration>(dowick, v1, v1, v4, v4)
+ genericTwoParticleGreensfunction<Configuration>(dowick, v3, v3, v2, v2));
sum += pref * sum2;
}
func[k][j] = Help<GFRetVal>::toreal(phase_factor * sum);//normalization not necessary since the 1/N factor is already in the definition of G_0
}
}
this->add_bin(func);
return;
}
/**
* This measures <S^+(k, tau) S^-(0,0)> which is due to translation symmetry the same as <S^+(k,tau), S^-(k,0)>
* Note also that by its bare definition it is a real quantity.
* */
template <class Config>
class SplusSminus : public Network_Cache<Config, std::valarray<std::valarray<typename Config::Configuration::FPType> > >
{
public:
typedef typename Config::Configuration Configuration;
typedef typename Configuration::FPType FPType;
typedef typename Config::SignType GFRetVal;
typedef std::valarray<FPType> Function;///< SplusSminus is a real quantity
typedef std::valarray<Function> ObservableType;///< spin and charge correlations are potentially complex in kspace and are spatially resolved time-dependent observable
/**
The Constructor
*/
SplusSminus(typename Config::Comm& n, const Parameters& params) throw() : Network_Cache<Config,ObservableType>(n, "SplusSminus"),
len(params.N), functionpoints(params.functionpoints), delta_s(params.delta_s)
{
}
void dryrun(DryRun<typename Configuration::value_type, GFRetVal>&);
/**
this determines the observable for a given order
@param configuration the configuration
*/
inline void evaluate(const Configuration&, const DoWick<typename Configuration::value_type, GFRetVal>&);
private:
const uint32_t& len;
const uint32_t& functionpoints;
const double delta_s;
};
template <class Config>
void SplusSminus<Config>::dryrun(DryRun<typename Configuration::value_type, GFRetVal>& func)
{
const typename Configuration::value_type v2(0, 0, UP);
const typename Configuration::value_type v4(0, 0, DOWN);
// func(v4, v2);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
if(j == 0)
s = 0.000001;
for(uint r = 0; r < len; ++r)
{
const typename Configuration::value_type v1(r, s, UP);
const typename Configuration::value_type v3(r, s, DOWN);
// func.template onSector<UP>(r, s, 0,0);
// func.template onSector<DOWN>(0, 0, r, s);
/*func(v1, v3);
func(v1, v2);
func(v4, v3);*/
genericTwoParticleGreensfunction_dry<Configuration>(func, v1, v3, v4, v2);
}
}
return;
}
template <class Config>
void SplusSminus<Config>::evaluate(const Configuration& configuration, const DoWick<typename Configuration::value_type, GFRetVal>& dowick)
{
ObservableType func(len);
FPType invlen = 1.0/len;
const FPType fac = TWOPI * invlen;
const typename Configuration::value_type v2(0, 0, UP);
const typename Configuration::value_type v4(0, 0, DOWN);
// auto constres = dowick(v4, v2);
for (unsigned int k = 0; k < len; ++k)
{
func[k].resize(functionpoints);
for (unsigned int j = 0; j < functionpoints; ++j)
{
FPType s = j * delta_s;
if(j == 0)
s = 0.000001;//seems to be necessary here...
GFRetVal sum = 0;
for(uint r = 0; r < len; ++r)
{
FPType pref = std::cos(fac * (k*r));
const typename Configuration::value_type v1(r, s, UP);
const typename Configuration::value_type v3(r, s, DOWN);
auto sum2 =
// dowick(v1, v3) * constres - dowick(v1, v2) * dowick(v4, v3);
//dowick.template onSector<UP>(r, s, 0,0) * dowick.template onSector<DOWN>(0, 0, r, s);
genericTwoParticleGreensfunction<Configuration>(dowick, v1, v3, v4, v2);
sum += pref * sum2;
}
func[k][j] = Help<GFRetVal>::toreal(configuration.phase * sum);//normalization not necessary since the 1/N factor is already in the definition of G_0
}
}
this->add_bin(func);
return;
}
#endif
|
simple_env.c | // RUN: %libomp-compile
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_AFFINITY_FORMAT='TESTER-ENV: tl:%L tn:%n nt:%N' OMP_NUM_THREADS=8 %libomp-run | %python %S/check.py -c 'CHECK-8' %s
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
#pragma omp parallel
{ }
#pragma omp parallel
{ }
return 0;
}
// CHECK-8: num_threads=8 TESTER-ENV: tl:1 tn:[0-7] nt:8$
|
prime.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
int main(){
int index;
int i;
int count;
int first;
int N;
int prime;
char *marked;
double start, end;
omp_set_num_threads(8);
int n = 1000000;
FILE *fptr;
N = n+1;
marked = (char *) malloc (N);
if (marked==NULL){
printf("Cannot allocate enough memory\n");
exit(1);
}
for(i=0;i<N;i++){
marked[i]=1;
}
marked[0]=0;
marked[1]=0;
index=2;
prime=2;
start = omp_get_wtime();
# pragma omp parallel shared(n,marked),private(first)
while(prime*prime<=n){
first = 2*prime;
#pragma omp parallel for
for (i =first;i<N;i+=prime) marked[i]=0;
while(!marked[++index]);
prime=index;
}
# pragma omp barrier
count=0;
fptr = fopen("kPairs_1000000.txt", "w");
#pragma omp parallel for reduction(+:count)
for(i=3;i<N;i=i+2){
if(marked[i]==1 && marked[i+2]==1){
fprintf(fptr,"(%d,%d) \n",i,i+2);
++count;
}
}
end = omp_get_wtime() - start;
printf("n : %d , k : %d\n",n,count);
printf("%.6g seconds\n",end);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(32*t3+Nx+28,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
cgelqf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgelqf.c, normal z -> c, Fri Sep 28 17:38:01 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes tile LQ factorization of a complex m-by-n matrix A.
* The factorization has the form
* \f[ A = L \times Q \f],
* where L is a lower trapezoidal with positive diagonal and Q is a matrix with
* orthonormal rows.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower
* triangular if M <= N); the elements above the diagonal represent
* the unitary matrix Q as a product of elementary reflectors, stored
* by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_cgelqs
* to solve the system of equations.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_cgelqf
* @sa plasma_cgelqf
* @sa plasma_dgelqf
* @sa plasma_sgelqf
* @sa plasma_cgelqs
*
******************************************************************************/
int plasma_cgelqf(int m, int n,
plasma_complex32_t *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_cgelqf(A, *T, work, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes the tile LQ factorization of a matrix.
* Non-blocking tile version of plasma_cgelqf().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_cgelqs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For LQ factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cgelqf
* @sa plasma_omp_cgelqf
* @sa plasma_omp_dgelqf
* @sa plasma_omp_sgelqf
* @sa plasma_omp_cgelqs
*
******************************************************************************/
void plasma_omp_cgelqf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pcgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pcgelqf(A, T, work, sequence, request);
}
}
|
mish_kernel_ref_fp32.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author:
*/
#include "mish_kernel_ref.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/float.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_mish_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = src[i] * tanhf(log(1 + exp(src[i])));
}
}
return 0;
}
|
nested_task_creation.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
/*
* This test creates tasks that themselves create a new task.
* The runtime has to take care that they are correctly freed.
*/
int main()
{
#pragma omp task
{
#pragma omp task
{
my_sleep( 0.1 );
}
}
#pragma omp parallel num_threads(2)
{
#pragma omp single
#pragma omp task
{
#pragma omp task
{
my_sleep( 0.1 );
}
}
}
printf("pass\n");
return 0;
}
|
sort_test.c | // gcc -O4 sort_test.c -fopenmp -lgomp
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define THREADS 4
int cmpfunc (const void * a, const void * b) {
return ( *(int *) a - *(int *) b );
}
void print_arr(int * arr, int num_elem) {
for ( int i = 0; i < num_elem; i++ ) {
printf("%d ", arr[i]);
}
printf("\n");
}
void copy_arr(int *src, int *dest, int num_elem) {
for ( int i = 0; i < num_elem; i++ ) {
dest[i] = src[i];
}
}
void print_info(int ** info) {
int num_partitions = 4;
printf("info begin\n");
for ( int i = 0; i < num_partitions; i++ ) {
print_arr(info[i], 4);
}
printf("info end\n");
}
/* sorts an array of ints arr in parallel*/
int parallel_sort(int ** arr, int num_elem, int ** info, int num_partitions) {
int * vals = *arr;
/*first linear scan to determine count of num elem in each partition*/
// printf("first linear scan\n");
// #pragma omp parallel for schedule(static) num_threads(THREADS)
#pragma omp parallel for
for ( int i = 0; i < num_partitions; i++ ) {
for ( int j = 0; j < num_elem; j++ ) {
if ( (vals[j] >= info[i][0]) && (vals[j] < info[i][1]) ) {
info[i][2] += 1;
}
}
}
// print_info(info);
/* set up info with appropriate pointers*/
// printf("set up pointers in info\n");
info[0][3] = 0;
#pragma omp parallel for schedule(static) num_threads(THREADS)
for ( int i = 1; i < num_partitions; i++ ) {
info[i][3] = info[i-1][2] + info[i-1][3];
}
// print_info(info);
/* second linear scan to put elements in their appropriate buckets */
// printf("second linear scan\n");
int * bucketed_vals = (int *) malloc(num_elem * sizeof(int));
#pragma omp parallel for schedule(static) num_threads(THREADS)
for ( int i = 0; i < num_partitions; i++ ) {
for ( int j = 0; j < num_elem; j++ ) {
if ( (vals[j] >= info[i][0]) && (vals[j] < info[i][1]) ) {
bucketed_vals[(int) info[i][3]] = vals[j];
info[i][3] += 1;
}
}
}
// print_arr(bucketed_vals, num_elem);
// print_info(info);
// printf("last bit of sorting\n");
clock_t start = clock();
#pragma omp parallel for schedule(static) num_threads(THREADS)
for ( int i = 0; i < num_partitions; i++ ) {
int loc = (int) (info[i][3] - info[i][2]);
qsort(&bucketed_vals[loc], info[i][2], sizeof(int), cmpfunc);
}
clock_t stop = clock();
printf("core time = %d \n", stop - start);
free(*arr);
*arr = bucketed_vals; /*now bucketed_vals is fully sorted*/
// print_arr(bucketed_vals, num_elem);
// printf("arr sorted is at %p\n", *arr);
return 1; /* indicates success */
}
int arr_equal(int * a, int * b, int num_elem) {
for ( int i = 0; i < num_elem; i++ ) {
if (a[i] != b[i]) {
return 1;
}
}
return 0;
}
int main() {
printf("begin\n");
int num_elem = 100000000;
int * values = (int *) malloc(num_elem * sizeof(int));
// printf("before sorting, values is located at %p\n", values);
int * values2 = (int *) malloc(num_elem * sizeof(int));
for ( int i = 0; i < num_elem; i++ ) {
values[i] = rand();
}
// printf("values is : ");
// print_arr(values, num_elem);
copy_arr(values, values2, num_elem);
// printf("values2 is : ");
// print_arr(values2, num_elem);
int num_partitions = 4; //must be at least 2
int ** info = (int **) malloc(num_partitions * sizeof(int *));
for ( int i = 0; i < 4; i++ ) {
info[i] = (int *) malloc(4 * sizeof (int));
}
for ( int i = 0; i < num_partitions; i++ ) {
int partition_size = RAND_MAX / num_partitions;
info[i][0] = i * partition_size;
info[i][1] = (i+1) * partition_size;
info[i][2] = 0;
info[i][3] = 0;
}
info[num_partitions - 1][1] = RAND_MAX; //added to avoid any issues with integer division
// printf("info set up\n");
/* parallel sort */
printf("parallel sort\n");
clock_t start = clock();
parallel_sort(&values, num_elem, info, num_partitions);
clock_t diff = clock() - start;
int psort_msec = diff * 1000 / CLOCKS_PER_SEC; // should be 1000 for msec TODO
printf("after sorting, values is located at %p\n", values);
// print_arr(values, num_elem);
/*standard lib sort*/
printf("standard sort\n");
start = clock();
qsort(values2, num_elem, sizeof(int), cmpfunc);
diff = clock() - start;
int qsort_msec = diff * 1000 / CLOCKS_PER_SEC;
// print_arr(values2, num_elem);
printf("psort took %d and qsort took %d\n", psort_msec, qsort_msec);
if (arr_equal(values, values2, num_elem) == 0) {
printf("SUCCESS\n");
} else {
printf("FAILURE, arrays not equal\n");
}
printf("end\n");
free(values);
free(values2);
for ( int i = 0; i < num_partitions; i++ ) {
free(info[i]);
}
free(info);
return 0;
}
|
tasks.h | /*~-------------------------------------------------------------------------~~*
* Copyright (c) 2016 Los Alamos National Laboratory, LLC
* All rights reserved
*~-------------------------------------------------------------------------~~*/
////////////////////////////////////////////////////////////////////////////////
/// \file
/// \brief Simple tasks related to solving full hydro solutions.
////////////////////////////////////////////////////////////////////////////////
#pragma once
// hydro includes
#include "types.h"
// flecsi includes
#include <flecsi-sp/io/io_exodus.h>
#include <flecsi/execution/context.h>
#include <flecsi/execution/execution.h>
#include <ristra/utils/string_utils.h>
// system includes
#include <iomanip>
namespace apps {
namespace hydro {
////////////////////////////////////////////////////////////////////////////////
//! \brief Update mesh geometry
//!
//! \param [in] mesh the mesh object
////////////////////////////////////////////////////////////////////////////////
void update_geometry(
client_handle_r<mesh_t> mesh
) {
mesh.update_geometry();
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task for setting initial conditions
//!
//! \param [in,out] mesh the mesh object
//! \param [in] ics the initial conditions to set
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void initial_conditions(
client_handle_r<mesh_t> mesh,
eos_t eos,
real_t soln_time,
dense_handle_w<real_t> d,
dense_handle_w<vector_t> v,
dense_handle_w<real_t> e,
dense_handle_w<real_t> p,
dense_handle_w<real_t> T,
dense_handle_w<real_t> a
) {
for ( auto c : mesh.cells( flecsi::owned ) ) {
auto lid = c.id();
std::tie( d(c), v(c), p(c) ) = inputs_t::initial_conditions(
mesh, lid, soln_time );
eqns_t::update_state_from_pressure(
pack( c, d, v, p, e, T, a ),
eos
);
}
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task for setting initial conditions
//!
//! \param [in,out] mesh the mesh object
//! \param [in] ics the initial conditions to set
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void initial_conditions_from_file(
client_handle_r<mesh_t> mesh,
eos_t eos,
real_t soln_time,
char_array_t filename,
dense_handle_w<real_t> d,
dense_handle_w<vector_t> v,
dense_handle_w<real_t> e,
dense_handle_w<real_t> p,
dense_handle_w<real_t> T,
dense_handle_w<real_t> a
) {
auto ics = inputs_t::get_initial_conditions(filename.str());
// This doesn't work with lua input
//#pragma omp parallel for
for ( auto c : mesh.cells( flecsi::owned ) ) {
std::tie( d(c), v(c), p(c) ) = ics( c->centroid(), soln_time );
eqns_t::update_state_from_pressure(
pack( c, d, v, p, e, T, a ),
eos
);
}
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to compute the time step size.
//!
//! \tparam E The equation of state object to use.
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
real_t evaluate_time_step(
client_handle_r<mesh_t> mesh,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a,
real_t CFL,
real_t max_dt
) {
// Loop over each cell, computing the minimum time step,
// which is also the maximum 1/dt
real_t dt_inv(0);
for ( auto c : mesh.cells( flecsi::owned ) ) {
// get the solution state
auto u = pack( c, d, v, p, e, T, a );
// loop over each face
for ( auto f : mesh.faces(c) ) {
// estimate the length scale normal to the face
auto delta_x = c->volume() / f->area();
// compute the inverse of the time scale
auto dti = eqns_t::fastest_wavespeed( u, f->normal() ) / delta_x;
// check for the maximum value
dt_inv = std::max( dti, dt_inv );
} // edge
} // cell
if ( dt_inv <= 0 )
THROW_RUNTIME_ERROR( "infinite delta t" );
real_t time_step = 1 / dt_inv;
time_step *= CFL;
// access the computed time step and make sure its not too large
time_step = std::min( time_step, max_dt );
return time_step;
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to evaluate fluxes at each face.
//!
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void evaluate_fluxes(
client_handle_r<mesh_t> mesh,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a,
dense_handle_w<flux_data_t> flux
) {
const auto & face_list = mesh.faces( flecsi::owned );
auto num_faces = face_list.size();
#pragma omp parallel for
for ( counter_t fit = 0; fit < num_faces; ++fit )
{
const auto & f = face_list[fit];
// get the cell neighbors
const auto & cells = mesh.cells(f);
auto num_cells = cells.size();
// get the left state
auto w_left = pack( cells[0], d, v, p, e, T, a );
// compute the face flux
//
// interior cell
if ( num_cells == 2 ) {
auto w_right = pack( cells[1], d, v, p, e, T, a );
flux(f) = flux_function<eqns_t>( w_left, w_right, f->normal() );
}
// boundary cell
else {
flux(f) = boundary_flux<eqns_t>( w_left, f->normal() );
}
// scale the flux by the face area
flux(f) *= f->area();
} // for
//----------------------------------------------------------------------------
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to update the solution in each cell.
//!
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void apply_update(
client_handle_r<mesh_t> mesh,
eos_t eos,
real_t delta_t,
dense_handle_r<flux_data_t> flux,
dense_handle_rw<real_t> d,
dense_handle_rw<vector_t> v,
dense_handle_rw<real_t> e,
dense_handle_rw<real_t> p,
dense_handle_rw<real_t> T,
dense_handle_rw<real_t> a
) {
//----------------------------------------------------------------------------
// Loop over each cell, scattering the fluxes to the cell
//auto delta_t = static_cast<real_t>( time_step );
const auto & cell_list = mesh.cells( flecsi::owned );
auto num_cells = cell_list.size();
#pragma omp parallel for
for ( counter_t cit = 0; cit < num_cells; ++cit )
{
const auto & c = cell_list[cit];
// initialize the update
flux_data_t delta_u( 0 );
// loop over each connected edge
for ( auto f : mesh.faces(c) ) {
// get the cell neighbors
auto neigh = mesh.cells(f);
auto num_neigh = neigh.size();
// add the contribution to this cell only
if ( neigh[0] == c )
delta_u -= flux(f);
else
delta_u += flux(f);
} // edge
// now compute the final update
delta_u *= delta_t/c->volume();
// apply the update
auto u = pack(c, d, v, p, e, T, a);
eqns_t::update_state_from_flux( u, delta_u );
// update the rest of the quantities
eqns_t::update_state_from_energy( u, eos );
// check the solution quantities
if ( eqns_t::internal_energy(u) < 0 || eqns_t::density(u) < 0 )
THROW_RUNTIME_ERROR( "Negative density or internal energy encountered!" );
} // for
//----------------------------------------------------------------------------
}
////////////////////////////////////////////////////////////////////////////////
/// \brief output the solution
////////////////////////////////////////////////////////////////////////////////
void output(
client_handle_r<mesh_t> mesh,
char_array_t prefix,
char_array_t postfix,
size_t iteration,
real_t time,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a
) {
clog(info) << "OUTPUT MESH TASK" << std::endl;
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
// figure out this ranks file name
auto output_filename =
prefix.str() + "_rank" + apps::common::zero_padded(rank) +
"." + apps::common::zero_padded(iteration) + "." + postfix.str();
// now outut the mesh
using field_type = decltype(d);
std::vector<field_type*> var_ptrs{&d, &p};
std::vector<std::string> var_names{"density", "pressure"};
flecsi_sp::io::io_exodus<mesh_t>::write(
output_filename, mesh, time, var_ptrs, var_names
);
}
////////////////////////////////////////////////////////////////////////////////
/// \brief output the solution
////////////////////////////////////////////////////////////////////////////////
void print(
client_handle_r<mesh_t> mesh,
char_array_t filename
) {
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
clog(info) << "PRINT MESH ON RANK " << rank << std::endl;
// figure out this ranks file name
auto name_and_ext = ristra::utils::split_extension( filename.str() );
auto output_filename =
name_and_ext.first + "_rank" + apps::common::zero_padded(rank) +
"." + name_and_ext.second;
// dump to file
std::cout << "Dumping connectivity to: " << output_filename << std::endl;
std::ofstream file( output_filename );
mesh.dump( file );
// close file
file.close();
}
////////////////////////////////////////////////////////////////////////////////
/// \brief Dump solution to file for regression testing
////////////////////////////////////////////////////////////////////////////////
void dump(
client_handle_r<mesh_t> mesh,
size_t iteration,
real_t time,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
char_array_t filename) {
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
constexpr auto num_dims = mesh_t::num_dimensions;
const auto & vert_lid_to_gid = context.index_map( mesh_t::index_spaces_t::vertices );
const auto & cell_lid_to_gid = context.index_map( mesh_t::index_spaces_t::cells );
clog(info) << "DUMP SOLUTION ON RANK " << rank << std::endl;
// figure out this ranks file name
auto name_and_ext = ristra::utils::split_extension( filename.str() );
auto output_filename =
name_and_ext.first + "_rank" + apps::common::zero_padded(rank) +
+ "." + name_and_ext.second;
// dump to file
if (rank==0)
std::cout << "Dumping solution to: " << output_filename << std::endl;
std::ofstream file( output_filename );
// Dump cell centered quantities
file.precision(14);
file.setf( std::ios::scientific );
file << "# Solution time: " << time << std::endl;
file << "# Number iterations: " << iteration << std::endl;
file << "# BEGIN CELLS" << std::endl;
file << "# Total number: " << mesh.num_cells() << std::endl;
file << "# local_id global_id ";
for ( int dim=0; dim<num_dims; ++dim ) file << "centroid(" << dim << ") ";
file << "density internal_energy pressure ";
for ( int dim=0; dim<num_dims; ++dim ) file << "velocity(" << dim << ") ";
file << std::endl;
for ( auto c : mesh.cells() ) {
file << c.id() << " " << cell_lid_to_gid.at(c.id()) << " ";
for ( auto x : c->centroid() ) file << x << " ";
file << d(c) << " " << e(c) << " " << p(c) << " ";
for ( auto x : v(c) ) file << x << " ";
file << std::endl;
}
file << "# END CELLS" << std::endl;
// Dump vertex quantities
file << "# BEGIN VERTICES" << std::endl;
file << "# Total number: " << mesh.num_vertices() << std::endl;
file << "# local_id global_id ";
for ( int dim=0; dim<num_dims; ++dim ) file << "coordinate(" << dim << ") ";
file << std::endl;
for ( auto v : mesh.vertices() ) {
file << v.id() << " " << vert_lid_to_gid.at(v.id()) << " ";
for ( auto x : v->coordinates() ) file << x << " ";
file << std::endl;
}
file << "# END VERTICES" << std::endl;
// close file
file.close();
}
////////////////////////////////////////////////////////////////////////////////
// TASK REGISTRATION
////////////////////////////////////////////////////////////////////////////////
flecsi_register_task(update_geometry, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(initial_conditions, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(initial_conditions_from_file, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(evaluate_time_step, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(evaluate_fluxes, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(apply_update, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(output, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(print, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(dump, apps::hydro, loc, index|flecsi::leaf);
} // namespace hydro
} // namespace apps
|
datascope_parallel.c | #include <stdio.h>
int static_data = 0;
int main() {
int i;
int dynamic_data = 0;
#pragma omp parallel
{
//start parallel
int local = 0;
for (i=0; i<1000; i++) {
local += 1;
dynamic_data += 1;
static_data += 1;
}
printf("localの値=%d\n", local);
}
//end parallel
printf("dynamic_dataの値=%d\n", dynamic_data);
printf("static_dataの値=%d\n", static_data);
return 0;
}
|
GB_binop__minus_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__minus_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__minus_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int64)
// A*D function (colscale): GB (_AxD__minus_int64)
// D*A function (rowscale): GB (_DxB__minus_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int64)
// C=scalar+B GB (_bind1st__minus_int64)
// C=scalar+B' GB (_bind1st_tran__minus_int64)
// C=A+scalar GB (_bind2nd__minus_int64)
// C=A'+scalar GB (_bind2nd_tran__minus_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT64 || GxB_NO_MINUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__minus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr.c |
/*
Author: Mohammed Ahmed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <omp.h>
#include "inc/allocator.h"
#include "inc/geometry.h"
#include "inc/msh/mesh.h"
/* c stdlib qsort comparable function */
static inline int
comp(const void *restrict a, const void *restrict b)
{
return (*((uint32_t *) a) - *((uint32_t *) b));
}
void
m2csr(struct geometry *restrict g)
{
/* Row pointers */
uint32_t *restrict ia;
#if defined(__USE_MEMKIND) && defined(__USE_POSIX_HBW)
ia = calloc((g->n->sz + 1), sizeof(uint32_t));
#else
kcalloc((g->n->sz+1), sizeof(uint32_t), (void *) &ia);
#endif
uint32_t i;
for(i = 0; i < g->e->sz; i++)
{
ia[g->e->eptr->n0[i]+1]++;
ia[g->e->eptr->n1[i]+1]++;
}
ia[0] = 1;
for(i = 1; i <= g->n->sz; i++)
{
ia[i] += ia[i-1];
ia[i]++;
}
/* Adjust the IA array to Zero-index (c-style) */
for(i = 0; i <= g->n->sz; i++) ia[i]--;
uint32_t *restrict ja;
#if defined(__USE_MEMKIND) && defined(__USE_POSIX_HBW)
ja = calloc(ia[g->n->sz], sizeof(uint32_t));
#else
kmalloc(ia[g->n->sz], sizeof(uint32_t), (void *) &ja);
#endif
double *aa = (double *) calloc(ia[g->n->sz] * 4 * 4, sizeof(double));
/* A temp buffer used to keep tracking of each row elements */
uint32_t *restrict buf;
kmalloc(g->n->sz, sizeof(uint32_t), (void *) &buf);
/* Column Index of the diagonal elements */
for(i = 0; i < g->n->sz; i++)
{
ja[ia[i]] = i; // A diagonal element
buf[i] = 1; // One element in this row has been added
}
/* Fill the rest of the array, ordered by RCM and using a
* modified version of Breadth-First Search traversing algorithm */
for(i = 0; i < g->e->sz; i++)
{
uint32_t n0 = g->e->eptr->n0[i];
uint32_t n1 = g->e->eptr->n1[i];
/* Get the element index in the row
* The index is basically the row index plus the last element that
* has been added in the row. */
uint32_t indx = ia[n0] + buf[n0]; // Get the index
buf[n0]++; // Column has been added (one more element in the row)
ja[indx] = n1; // Store the node index in its corresponding index
/* Do it for the other endpoint */
indx = ia[n1] + buf[n1];
buf[n1]++;
ja[indx] = n0;
}
kfree(buf);
// Number of nonzero block per row
// uint32_t *restrict nnz;
// kmalloc(g->n->sz, sizeof(uint32_t), (void *) &nnz);
size_t nz_total = 0;
/* Sort the each row of a ja array in an increasing order
* No we reorder them again to make sure the at each row
* we have the node ordered in increasing order plus based on
* their degree */
#pragma omp parallel for reduction(+:nz_total)
for(i = 0; i < g->n->sz; i++)
{
uint32_t jstart = ia[i];
uint32_t jend = ia[i+1];
/* Qsort to sort the JA array */
uint32_t * l = ja + jstart; // Low address
uint32_t * h = ja + jend; // High address
size_t sz = h - l;
qsort(l, sz, sizeof(uint32_t), comp);
uint32_t nz = 0;
uint32_t j;
for(j = jstart; j < jend; j++) nz++;
// nnz[i] = nz;
nz_total += nz;
}
g->c->aa = aa;
g->c->ia = ia; // Starting row indices
g->c->ja = ja; // Column indices
g->c->nnz = nz_total; // Number of nonzero blocks
#ifdef __USE_COMPRESSIBLE_FLOW
/* Compressible Euler flow */
g->c->bsz = 5; // 5 unknowns per grid point
#else
/* Incompressible Euler flow */
g->c->bsz = 4; // 4 unknowns per grid point
g->c->bsz2 = 4*4; // 4 unknowns per grid point
#endif
int *ilen = (int *) calloc(g->n->sz, sizeof(int));
g->c->ailen = ilen;
/* Number of the matrix rows | columns */
g->c->sz = g->c->bsz * g->n->sz;
}
|
example1.c | // calculation example using a datafile outputed by 'mie_ms_solver'.
// verification of radiation force, radiation torque and absorbed energy.
#include "emf_mie_ms.h"
// radiation force and torque calculated by surface integral of maxwell stress tensor
int force_torque_integral(int i,double *vf,double *vn,MSPD *msp);
// absorbed energy calculated by surface integral of poynting vector.
int absorb_energy_poynting(int i,double *P,MSPD *msp);
// absorbed energy calculated by volume integral of loss by Joulian heat.
int absorb_energy_joulian(int i,double *P,MSPD *msp);
int main(int argc,char *argv[])
{
MSPD msp;
double complex e[3],h[3],cet,fv;
double vf[3],vn[3],r[3],t,p;
int i;
read_dat_ms(argv[1],&msp); // read data file
print_data_ms(&msp); // print data
print_data_ms_mksa(&msp); // print data in MKSA system of units
r[0]= 0.0; // set x-coordinate
r[1]= 0.0; // set y-coordinate
r[2]=-1.5; // set z-coordinate
total_EH_ms(e,h,r,&msp); // calclation of total field ( add incident field to scattered field )
printf("Electromagnetic field at r=( % g,% g,% g )\n",r[0],r[1],r[2]);
fv=OSUtoMKSA_ElectricField(e[0]);
printf("Ex = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [V/m](MKSA))\n",creal(e[0]),cimag(e[0]),creal(fv),cimag(fv));
fv=OSUtoMKSA_ElectricField(e[1]);
printf("Ey = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [V/m](MKSA))\n",creal(e[1]),cimag(e[1]),creal(fv),cimag(fv));
fv=OSUtoMKSA_ElectricField(e[2]);
printf("Ez = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [V/m](MKSA))\n",creal(e[2]),cimag(e[2]),creal(fv),cimag(fv));
fv=OSUtoMKSA_MagneticField(h[0]);
printf("Hx = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [A/m](MKSA))\n",creal(h[0]),cimag(h[0]),creal(fv),cimag(fv));
fv=OSUtoMKSA_MagneticField(h[1]);
printf("Hy = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [A/m](MKSA))\n",creal(h[1]),cimag(h[1]),creal(fv),cimag(fv));
fv=OSUtoMKSA_MagneticField(h[2]);
printf("Hz = % 15.14e %+15.14e I (=% 15.14e %+15.14e I [A/m](MKSA))\n",creal(h[2]),cimag(h[2]),creal(fv),cimag(fv));
t=1.0e-3; // set time
cet=cexp(-I*msp.bm.omega*t);
printf("Real electromagnetic field at t=%g\n",t);
fv=OSUtoMKSA_ElectricField(e[0]*cet);
printf("Ex = % 15.14e (=% 15.14e [V/m](MKSA))\n",creal(e[0]*cet),creal(fv));
fv=OSUtoMKSA_ElectricField(e[1]*cet);
printf("Ey = % 15.14e (=% 15.14e [V/m](MKSA))\n",creal(e[1]*cet),creal(fv));
fv=OSUtoMKSA_ElectricField(e[2]*cet);
printf("Ez = % 15.14e (=% 15.14e [V/m](MKSA))\n",creal(e[2]*cet),creal(fv));
fv=OSUtoMKSA_MagneticField(h[0]*cet);
printf("Hx = % 15.14e (=% 15.14e [A/m](MKSA))\n",creal(h[0]*cet),creal(fv));
fv=OSUtoMKSA_MagneticField(h[1]*cet);
printf("Hy = % 15.14e (=% 15.14e [A/m](MKSA))\n",creal(h[1]*cet),creal(fv));
fv=OSUtoMKSA_MagneticField(h[2]*cet);
printf("Hz = % 15.14e (=% 15.14e [A/m](MSKA))\n",creal(h[2]*cet),creal(fv));
printf("\n");
printf("Radiation force and torque\n");
for(i=0;i<msp.n_sphr;i++){
force_torque_ms(i,vf,vn,&msp);
printf("Mie coefficients\n");
printf("sphere id %2d, F=( % 15.14g,% 15.14g,% 15.14g )\n",i,vf[0],vf[1],vf[2]);
printf(" %2d, N=( % 15.14g,% 15.14g,% 15.14g )\n",i,vn[0],vn[1],vn[2]);
printf(" %2d, F=( % 15.14g,% 15.14g,% 15.14g ) [ N ](MKSA)\n",i,OSUtoMKSA_Force(vf[0]),OSUtoMKSA_Force(vf[1]),OSUtoMKSA_Force(vf[2]));
printf(" %2d, N=( % 15.14g,% 15.14g,% 15.14g ) [N m](MKSA)\n",i,OSUtoMKSA_Torque(vn[0]),OSUtoMKSA_Torque(vn[1]),OSUtoMKSA_Torque(vn[2]));
if(force_torque_integral(i,vf,vn,&msp)){ // for verification
printf("Surface integral of maxwell stress tensor (verification)\n");
printf("sphere id %2d, F=( % 15.14g,% 15.14g,% 15.14g )\n",i,vf[0],vf[1],vf[2]);
printf(" %2d, N=( % 15.14g,% 15.14g,% 15.14g )\n",i,vn[0],vn[1],vn[2]);
printf(" %2d, F=( % 15.14g,% 15.14g,% 15.14g ) [ N ](MKSA)\n",i,OSUtoMKSA_Force(vf[0]),OSUtoMKSA_Force(vf[1]),OSUtoMKSA_Force(vf[2]));
printf(" %2d, N=( % 15.14g,% 15.14g,% 15.14g ) [N m](MKSA)\n",i,OSUtoMKSA_Torque(vn[0]),OSUtoMKSA_Torque(vn[1]),OSUtoMKSA_Torque(vn[2]));
}
}
printf("\nAbsorbed energy\n");
for(i=0;i<msp.n_sphr;i++){
absorbed_energy_ms(i,&p,&msp);
printf("Mie coefficients\n");
printf("sphere id %2d, P=% 15.14g\n",i,p);
printf(" %2d, P=% 15.14g [W]\n",i,OSUtoMKSA_power(p));
if(absorb_energy_poynting(i,&p,&msp)){ // for verification
printf("Surface integral of Poynting vector (verification)\n");
printf("sphere id %2d, P=% 15.14g\n",i,p);
printf(" %2d, P=% 15.14g [W]\n",i,OSUtoMKSA_power(p));
}
if(absorb_energy_joulian(i,&p,&msp)){ // for verification
printf("Volume integral of loss by Joulian heat (verification)\n");
printf("sphere id %2d, P=% 15.14g\n",i,p);
printf(" %2d, P=% 15.14g [W]\n",i,OSUtoMKSA_power(p));
}
}
free_ms(&msp); // free allocated memory
return 0;
}
int force_torque_integral(int id,double *vf,double *vn,MSPD *msp)
{
const int nc=80;
double xt[nc],wt[nc],xp[2*nc],wp[2*nc];
double complex e[3],h[3];
double rc,r[3],Tx[3],Ty[3],Tz[3],sin_t,cos_t,sin_p,cos_p,eps,mu,aex2,aey2,aez2,ahx2,ahy2,ahz2,ne2,nh2;
double tfx,tfy,tfz,tnx,tny,tnz;
int i,j;
vf[0]=0.0; vf[1]=0.0; vf[2]=0.0;
vn[0]=0.0; vn[1]=0.0; vn[2]=0.0;
if(msp->n_sphr!=1){
//printf("this code can analize single sphere only. Return...\n");
return 0;
}
eps=msp->bm.n_0*msp->bm.n_0;
mu=1.0;
gauleg(0.0,M_PI,xt,wt,nc);
gauleg(0.0,2.0*M_PI,xp,wp,nc*2);
rc=msp->sp[id].a*2.0;
for(i=0;i<2*nc;i++){ // phi 0 to 2pi
sin_p=sin(xp[i]); cos_p=cos(xp[i]);
tfx=0.0; tfy=0.0; tfz=0.0;
tnx=0.0; tny=0.0; tnz=0.0;
for(j=0;j<nc;j++){ // theta 0 to pi
sin_t=sin(xt[j]); cos_t=cos(xt[j]);
r[0]=rc*sin_t*cos_p+msp->sp[id].xs;
r[1]=rc*sin_t*sin_p+msp->sp[id].ys;
r[2]=rc*cos_t +msp->sp[id].zs;
total_EH_ms(e,h,r,msp);
aex2=creal(e[0]*conj(e[0])); aey2=creal(e[1]*conj(e[1])); aez2=creal(e[2]*conj(e[2]));
ahx2=creal(h[0]*conj(h[0])); ahy2=creal(h[1]*conj(h[1])); ahz2=creal(h[2]*conj(h[2]));
ne2=aex2+aey2+aez2; nh2=ahx2+ahy2+ahz2;
// maxwell stress tensor
Tx[0]=0.5*(eps*aex2+mu*ahx2)-0.25*(eps*ne2+mu*nh2);
Tx[1]=0.5*(eps*creal(e[0]*conj(e[1]))+mu*creal(h[0]*conj(h[1])));
Tx[2]=0.5*(eps*creal(e[0]*conj(e[2]))+mu*creal(h[0]*conj(h[2])));
Ty[0]=0.5*(eps*creal(e[1]*conj(e[0]))+mu*creal(h[1]*conj(h[0])));
Ty[1]=0.5*(eps*aey2+mu*ahy2)-0.25*(eps*ne2+mu*nh2);
Ty[2]=0.5*(eps*creal(e[1]*conj(e[2]))+mu*creal(h[1]*conj(h[2])));
Tz[0]=0.5*(eps*creal(e[2]*conj(e[0]))+mu*creal(h[2]*conj(h[0])));
Tz[1]=0.5*(eps*creal(e[2]*conj(e[1]))+mu*creal(h[2]*conj(h[1])));
Tz[2]=0.5*(eps*aez2+mu*ahz2)-0.25*(eps*ne2+mu*nh2);
tfx+=(Tx[0]*sin_t*cos_p+Tx[1]*sin_t*sin_p+Tx[2]*cos_t)*rc*rc*sin_t*wt[j];
tfy+=(Ty[0]*sin_t*cos_p+Ty[1]*sin_t*sin_p+Ty[2]*cos_t)*rc*rc*sin_t*wt[j];
tfz+=(Tz[0]*sin_t*cos_p+Tz[1]*sin_t*sin_p+Tz[2]*cos_t)*rc*rc*sin_t*wt[j];
tnx+=( (Tz[0]*sin_t*cos_p+Tz[1]*sin_t*sin_p+Tz[2]*cos_t)*sin_t*sin_p
-(Ty[0]*sin_t*cos_p+Ty[1]*sin_t*sin_p+Ty[2]*cos_t)*cos_t)*rc*rc*rc*sin_t*wt[j];
tny+=( (Tx[0]*sin_t*cos_p+Tx[1]*sin_t*sin_p+Tx[2]*cos_t)*cos_t
-(Tz[0]*sin_t*cos_p+Tz[1]*sin_t*sin_p+Tz[2]*cos_t)*sin_t*cos_p)*rc*rc*rc*sin_t*wt[j];
tnz+=( (Ty[0]*sin_t*cos_p+Ty[1]*sin_t*sin_p+Ty[2]*cos_t)*sin_t*cos_p
-(Tx[0]*sin_t*cos_p+Tx[1]*sin_t*sin_p+Tx[2]*cos_t)*sin_t*sin_p)*rc*rc*rc*sin_t*wt[j];
}
vf[0]+=tfx*wp[i];
vf[1]+=tfy*wp[i];
vf[2]+=tfz*wp[i];
vn[0]+=tnx*wp[i];
vn[1]+=tny*wp[i];
vn[2]+=tnz*wp[i];
}
return 1;
}
int absorb_energy_poynting(int id,double *P,MSPD *msp)
{
const int nc=80;
double xt[nc],wt[nc],xp[2*nc],wp[2*nc];
double complex e[3],h[3];
double rc,r[3],n[3],vp[3],sin_t,cos_t,sin_p,cos_p;
double tp;
int i,j;
*P=0.0;
if(msp->n_sphr!=1){
//printf("this code can analize single sphere only. Return...\n");
return 0;
}
gauleg(0.0,M_PI,xt,wt,nc);
gauleg(0.0,2.0*M_PI,xp,wp,nc*2);
rc=msp->sp[id].a*2.0;
for(i=0;i<2*nc;i++){ // phi 0 to 2pi
sin_p=sin(xp[i]); cos_p=cos(xp[i]);
tp=0.0;
for(j=0;j<nc;j++){ // theta 0 to pi
sin_t=sin(xt[j]); cos_t=cos(xt[j]);
n[0]=sin_t*cos_p;
n[1]=sin_t*sin_p;
n[2]=cos_t;
r[0]=rc*n[0]+msp->sp[id].xs;
r[1]=rc*n[1]+msp->sp[id].ys;
r[2]=rc*n[2]+msp->sp[id].zs;
total_EH_ms(e,h,r,msp);
// poynting vector
vp[0]=creal(e[1]*conj(h[2])-e[2]*conj(h[1]));
vp[1]=creal(e[2]*conj(h[0])-e[0]*conj(h[2]));
vp[2]=creal(e[0]*conj(h[1])-e[1]*conj(h[0]));
tp+=(n[0]*vp[0]+n[1]*vp[1]+n[2]*vp[2])*rc*rc*sin_t*wt[j];
}
*P+=tp*wp[i];
}
*P*=-0.5;
return 1;
}
int absorb_energy_joulian(int id,double *P,MSPD *msp)
{
const int nc=60;
double xt[nc],wt[nc],xp[2*nc],wp[2*nc],xa[nc],wa[nc];
double complex e[3],h[3];
double r[3],n[3],sin_t,cos_t,sin_p,cos_p;
double tp,tpa;
int i,j,k;
*P=0.0;
gauleg(0.0,M_PI,xt,wt,nc);
gauleg(0.0,2.0*M_PI,xp,wp,nc*2);
gauleg(0.0,msp->sp[id].a,xa,wa,nc);
for(k=0;k<nc;k++){
tpa=0.0;
#pragma omp parallel for schedule(dynamic) private(sin_p,cos_p,tp,j,sin_t,cos_t,n,r,e,h) reduction(+:tpa)
for(i=0;i<2*nc;i++){ // phi 0 to 2pi
sin_p=sin(xp[i]); cos_p=cos(xp[i]);
tp=0.0;
for(j=0;j<nc;j++){ // theta 0 to pi
sin_t=sin(xt[j]); cos_t=cos(xt[j]);
n[0]=sin_t*cos_p;
n[1]=sin_t*sin_p;
n[2]=cos_t;
r[0]=xa[k]*n[0]+msp->sp[id].xs;
r[1]=xa[k]*n[1]+msp->sp[id].ys;
r[2]=xa[k]*n[2]+msp->sp[id].zs;
total_EH_ms(e,h,r,msp);
// |E|^2
tp+=creal(e[0]*conj(e[0])+e[1]*conj(e[1])+e[2]*conj(e[2]))*xa[k]*xa[k]*sin_t*wt[j];
}
tpa+=tp*wp[i];
}
*P+=tpa*wa[k];
}
*P*=msp->bm.omega*creal(msp->sp[id].ns)*cimag(msp->sp[id].ns); // P = P * 1/2 * conductivity
return 1;
}
|
least_squares_omp.h | /*
The following struct is optimized for cache efficient parallel
calculations using OpenMP making sure there is no strides
in adressing memory by scheduling with chunksize 1
*/
template < class NormalEquations,
template<typename> class WeightingScheme,
class OneMillerIndexFcalc>
struct accumulate_reflection_chunk_omp {
boost::scoped_ptr<smtbx::error> exception_;
boost::shared_ptr<NormalEquations> normal_equations_ptr;
NormalEquations& normal_equations;
cctbx::xray::observations<FloatType> const& reflections;
af::const_ref<std::complex<FloatType> > const& f_mask;
WeightingScheme<FloatType> const& weighting_scheme;
boost::optional<FloatType> scale_factor;
boost::shared_ptr<OneMillerIndexFcalc> f_calc_function_ptr;
OneMillerIndexFcalc& f_calc_function;
scitbx::sparse::matrix<FloatType> const
& jacobian_transpose_matching_grad_fc;
cctbx::xray::extinction_correction<FloatType> const& exti;
bool objective_only, compute_grad;
af::ref<std::complex<FloatType> > f_calc;
af::ref<FloatType> observables;
af::ref<FloatType> weights;
af::versa<FloatType, af::c_grid<2> >& design_matrix;
accumulate_reflection_chunk_omp(
boost::shared_ptr<NormalEquations> const& normal_equations_ptr,
cctbx::xray::observations<FloatType> const& reflections,
af::const_ref<std::complex<FloatType> > const& f_mask,
WeightingScheme<FloatType> const& weighting_scheme,
boost::optional<FloatType> scale_factor,
boost::shared_ptr<OneMillerIndexFcalc> f_calc_function_ptr,
scitbx::sparse::matrix<FloatType> const
& jacobian_transpose_matching_grad_fc,
cctbx::xray::extinction_correction<FloatType> const& exti,
bool objective_only,
af::ref<std::complex<FloatType> > f_calc,
af::ref<FloatType> observables,
af::ref<FloatType> weights,
af::versa<FloatType, af::c_grid<2> >& design_matrix)
: normal_equations_ptr(normal_equations_ptr), normal_equations(*normal_equations_ptr),
reflections(reflections), f_mask(f_mask), weighting_scheme(weighting_scheme),
scale_factor(scale_factor),
f_calc_function_ptr(f_calc_function_ptr), f_calc_function(*f_calc_function_ptr),
jacobian_transpose_matching_grad_fc(jacobian_transpose_matching_grad_fc),
exti(exti),
objective_only(objective_only), compute_grad(!objective_only),
f_calc(f_calc), observables(observables), weights(weights),
design_matrix(design_matrix)
{}
void operator()() {
try {
const int n = reflections.size();
const int n_rows = jacobian_transpose_matching_grad_fc.n_rows();
const int threads = get_available_threads();
std::vector <std::vector<FloatType> > gradients;
boost::ptr_vector<boost::shared_ptr<OneMillerIndexFcalc> > f_calc_threads;
f_calc_threads.resize(threads);
for (int i = 0; i < threads; i++) {
f_calc_threads[i] = f_calc_function.fork();
}
if (compute_grad) {
gradients.resize(n);
}
#pragma omp parallel num_threads(threads)
{
// Make a gradient vector for each thread
af::shared<FloatType> gradient(n_rows);
const int thread = omp_get_thread_num();
#pragma omp for schedule(static,1)
for (int i_h = 0; i_h < n; ++i_h) {
miller::index<> const& h = reflections.index(i_h);
if (f_mask.size()) {
f_calc_threads[thread]->compute(h, f_mask[i_h], compute_grad);
}
else {
f_calc_threads[thread]->compute(h, boost::none, compute_grad);
}
f_calc[i_h] = f_calc_threads[thread]->f_calc;
FloatType observable;
//skip hoarding memory if Gradients are not needed.
if (compute_grad) {
gradient = jacobian_transpose_matching_grad_fc *
f_calc_threads[thread]->grad_observable;
// sort out twinning
observable = process_twinning_with_grads(i_h,
gradient, f_calc_threads[thread]);
gradients[i_h].resize(n_rows);
for (int g = 0; g < gradient.size(); g++) {
gradients[i_h][g] = gradient[g];
}
}
else {
// sort out twinning
observable = process_twinning(i_h, f_calc_threads[thread]);
}
af::tiny<FloatType, 2> exti_k = exti.compute(h,
observable, compute_grad);
observable *= exti_k[0];
f_calc[i_h] *= std::sqrt(exti_k[0]);
observables[i_h] = observable;
FloatType weight = weighting_scheme(reflections.fo_sq(i_h),
reflections.sig(i_h), observable, scale_factor);
weights[i_h] = weight;
if (exti.grad_value()) {
int grad_index = exti.get_grad_index();
SMTBX_ASSERT(!(grad_index < 0
|| grad_index >= gradients.size()));
gradients[i_h][grad_index] += exti_k[1];
}
if (build_design_matrix) {
for (int i_g = 0; i_g < gradients[i_h].size(); i_g++) {
design_matrix(i_h, i_g) = gradients[i_h][i_g];
}
}
}
}
if (objective_only) {
if (weights.size()) {
normal_equations.add_residuals_omp(n, observables,
reflections.data().ref(), weights);
}
else {
normal_equations.add_residuals_omp(n, observables,
reflections.data().ref());
}
}
else {
normal_equations.add_equations_omp(n, n_rows, threads,
observables, gradients, reflections.data().ref(), weights);
}
}
catch (smtbx::error const& e) {
exception_.reset(new smtbx::error(e));
}
catch (std::exception const& e) {
exception_.reset(new smtbx::error(e.what()));
}
}
FloatType process_twinning_with_grads(int i_h,
af::shared<FloatType>& gradients,
boost::shared_ptr<OneMillerIndexFcalc> f_calc_thread) {
typedef typename cctbx::xray::observations<FloatType>::iterator itr_t;
typedef typename cctbx::xray::twin_fraction<FloatType> twf_t;
typedef typename cctbx::xray::observations<FloatType>::index_twin_component twc_t;
FloatType obs = f_calc_thread->observable;
if (reflections.has_twin_components()) {
itr_t itr = reflections.iterate(i_h);
FloatType measured_part = obs,
identity_part = 0,
obs_scale = reflections.scale(i_h);
obs *= obs_scale;
const twf_t* measured_fraction = reflections.fraction(i_h);
if (compute_grad) {
gradients *= obs_scale;
if (measured_fraction == 0) {
identity_part = measured_part;
}
}
std::size_t twc_cnt = 0;
while (itr.has_next()) {
twc_t twc = itr.next();
f_calc_thread->compute(twc.h, boost::none, compute_grad);
obs += twc.scale() * f_calc_thread -> observable;
if (compute_grad) {
af::shared<FloatType> tmp_gradients =
jacobian_transpose_matching_grad_fc * f_calc_thread->grad_observable;
gradients += twc.scale() * tmp_gradients;
if (twc.fraction != 0) {
if (twc.fraction->grad) {
SMTBX_ASSERT(!(twc.fraction->grad_index < 0 ||
twc.fraction->grad_index >= gradients.size()));
gradients[twc.fraction->grad_index] += f_calc_thread->observable;
}
}
else {
identity_part += f_calc_thread->observable;
}
twc_cnt++;
}
}
if (compute_grad) {
// consider multiple reflections with the 'prime' scale
itr.reset();
while (itr.has_next()) {
twc_t twc = itr.next();
if (twc.fraction != 0 && twc.fraction->grad) {
SMTBX_ASSERT(!(twc.fraction->grad_index < 0 ||
twc.fraction->grad_index >= gradients.size()));
gradients[twc.fraction->grad_index] -= identity_part;
}
}
if (twc_cnt != 0 && measured_fraction != 0 && measured_fraction->grad) {
SMTBX_ASSERT(!(measured_fraction->grad_index < 0 ||
measured_fraction->grad_index >= gradients.size()));
gradients[measured_fraction->grad_index] +=
measured_part - identity_part;
}
}
}
return obs;
}
FloatType process_twinning(int i_h,
boost::shared_ptr<OneMillerIndexFcalc> f_calc_thread) {
typedef typename cctbx::xray::observations<FloatType>::iterator itr_t;
typedef typename cctbx::xray::twin_fraction<FloatType> twf_t;
typedef typename cctbx::xray::observations<FloatType>::index_twin_component twc_t;
FloatType obs = f_calc_thread->observable;
if (reflections.has_twin_components()) {
itr_t itr = reflections.iterate(i_h);
FloatType measured_part = obs,
identity_part = 0,
obs_scale = reflections.scale(i_h);
obs *= obs_scale;
const twf_t* measured_fraction = reflections.fraction(i_h);
std::size_t twc_cnt = 0;
while (itr.has_next()) {
twc_t twc = itr.next();
f_calc_thread->compute(twc.h, boost::none, compute_grad);
obs += twc.scale() * f_calc_thread->observable;
}
}
return obs;
}
}; |
GB_binop__isne_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_int16
// A.*B function (eWiseMult): GB_AemultB__isne_int16
// A*D function (colscale): GB_AxD__isne_int16
// D*A function (rowscale): GB_DxB__isne_int16
// C+=B function (dense accum): GB_Cdense_accumB__isne_int16
// C+=b function (dense accum): GB_Cdense_accumb__isne_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int16
// C=scalar+B GB_bind1st__isne_int16
// C=scalar+B' GB_bind1st_tran__isne_int16
// C=A+scalar GB_bind2nd__isne_int16
// C=A'+scalar GB_bind2nd_tran__isne_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isne_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isne_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__isne_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__isne_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ej2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#define TAM 40
void rellenarArray(float *M){
for(int i=0;i<TAM;++i)
*(M+i)=5.0f;
}
int main() {
double start;int numthreads=4;
float *a = (float *)malloc(sizeof(float)*TAM);
float *b = (float *)malloc(sizeof(float)*TAM);
float *c = (float *)malloc(sizeof(float)*TAM);
rellenarArray(a);rellenarArray(b);
start = omp_get_wtime();
#pragma omp parallel for schedule(static) num_threads(numthreads)
for(int i=0;i<TAM;++i)
*(c+i)=*(a+i)+*(b+i);
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos y schedule STATIC, %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
start = omp_get_wtime();
#pragma omp parallel for schedule(dynamic) num_threads(numthreads)
for(int i=0;i<TAM;++i)
*(c+i)=*(a+i)+*(b+i);
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos y schedule DYNAMIC, %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
start = omp_get_wtime();
#pragma omp parallel for schedule(guided) num_threads(numthreads)
for(int i=0;i<TAM;++i)
*(c+i)=*(a+i)+*(b+i);
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos y schedule GUIDED, %lfs\n-------------------------------------------\n",numthreads,omp_get_wtime()-start);
return 0;
}
|
GB_unop__identity_bool_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_int8
// op(A') function: GB_unop_tran__identity_bool_int8
// C type: bool
// A type: int8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_int8
(
bool *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fig4.61-atomic.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_num_threads() 1
#endif
int main()
{
int ic, i, n = 7;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(3);
#endif
ic = 0;
#pragma omp parallel for shared(ic,n) private(i)
for (i=0; i<n; i++)
{
#pragma omp atomic
ic += 1;
}
printf("Counter = %d\n",ic);
return(0);
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; --i)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the the
/// iterations.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down. \param IsSigned Whether Start, Stop
/// and Stop are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Updated CanonicalLoopInfo.
CanonicalLoopInfo *createStaticWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the loop.
InsertPointTy createDynamicWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Updated CanonicalLoopInfo.
CanonicalLoopInfo *createWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
// Get the type corresponding to __kmpc_impl_lanemask_t from the deviceRTL
Type *getLanemaskType();
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the master.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// Code in the header, condition block, latch and exit block must not have any
/// side-effect. The body block is the single entry point into the loop body,
/// which may contain arbitrary control flow as long as all control paths
/// eventually branch to the latch block.
///
/// Defined outside OpenMPIRBuilder because one cannot forward-declare nested
/// classes.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
/// Whether this object currently represents a loop.
bool IsValid = false;
BasicBlock *Preheader;
BasicBlock *Header;
BasicBlock *Cond;
BasicBlock *Body;
BasicBlock *Latch;
BasicBlock *Exit;
BasicBlock *After;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const { return Preheader; }
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const { return Header; }
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const { return Cond; }
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const { return Body; }
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const { return Latch; }
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const { return Exit; }
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const { return After; }
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const { return getIndVar()->getType(); }
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
return {After, After->begin()};
};
Function *getFunction() const { return Header->getParent(); }
/// Consistency self-check.
void assertOK() const;
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
GB_sparse_add_template.c | //------------------------------------------------------------------------------
// GB_sparse_add_template: C=A+B, C<M>=A+B when C is sparse/hypersparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is sparse or hypersparse:
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse bitmap
// sparse sparse sparse full
// sparse sparse bitmap sparse
// sparse sparse bitmap bitmap
// sparse sparse bitmap full
// sparse sparse full sparse
// sparse sparse full bitmap
// sparse sparse full full
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// If all four matrices are sparse/hypersparse, and C<!M>=A+B is being
// computed, then M is passed in as NULL to GB_add_phase*. GB_add_sparsity
// returns apply_mask as false. The methods below do not handle the case when
// C is sparse, M is sparse, and !M is used. All other uses of !M when M
// is sparse result in a bitmap structure for C, and this is handled by
// GB_bitmap_add_template.
// For this case: the mask is done later, so C=A+B is computed here:
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
{
#ifdef GB_DEBUG
if (M == NULL || M_is_bitmap || M_is_full)
{
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
}
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j)
// phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k ] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (C_to_A == NULL) ? j : C_to_A [k] ;
if (kA >= 0)
{
pA = GBP (Ap, kA, vlen) ;
pA_end = GBP (Ap, kA+1, vlen) ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
int64_t pA_start = pA ;
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1, iA_last = -1 ;
if (ajnz > 0)
{
iA_first = GBI (Ai, pA, vlen) ;
iA_last = GBI (Ai, pA_end-1, vlen) ;
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (C_to_B == NULL) ? j : C_to_B [k] ;
if (kB >= 0)
{
pB = GBP (Bp, kB, vlen) ;
pB_end = GBP (Bp, kB+1, vlen) ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
int64_t pB_start = pB ;
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1, iB_last = -1 ;
if (bjnz > 0)
{
iB_first = GBI (Bi, pB, vlen) ;
iB_last = GBI (Bi, pB_end-1, vlen) ;
}
//------------------------------------------------------------------
// get M(:,j) if M is sparse or hypersparse
//------------------------------------------------------------------
bool sparse_mask_is_easy = false ;
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (M_is_sparse_or_hyper)
{
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1],
// which is a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch_is_Mh)
{
// Ch is the same as Mh (a deep copy)
ASSERT (Ch != NULL) ;
ASSERT (M_is_hyper) ;
ASSERT (Ch [k] == M->h [k]) ;
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = GBP (Mp, kM , vlen) ;
pM_end = GBP (Mp, kM+1, vlen) ;
}
}
// The "easy mask" condition requires M to be sparse/hyper
// and structural. A and B cannot be bitmap. Also one of
// the following 3 conditions must hold:
// (1) all entries are present in A(:,j) and B == M
// (2) all entries are present in B(:,j) and A == M
// (3) both A and B are aliased to M
sparse_mask_is_easy =
Mask_struct && // M must be structural
!A_is_bitmap && // A must not be bitmap
!B_is_bitmap && // B must not be bitmap
((adense && B == M) || // one of 3 conditions holds
(bdense && A == M) ||
(A == M && B == M)) ;
// TODO: add the condition above to GB_add_sparsity,
// where adense/bdense are true for the whole matrix
// (adense is true if A is full, or sparse/hypersparse with
// all entries present). The test here is done vector by
// vector, for each A(:,j) and B(:,j). This is a finer grain
// test, as compared to a test for all of A and B.
}
//------------------------------------------------------------------
// C(:,j)<optional mask> = A (:,j) + B (:,j) or subvector
//------------------------------------------------------------------
if (M == NULL)
{
//--------------------------------------------------------------
// M is not present, or !M is sparse but not applied here
//--------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
// If all four matrices are sparse or hypersparse, and
// Mask_comp is true, the mask M is passed in to this method as
// NULL. C=A+B is computed with no mask, and !M is applied
// later.
// A and B are both sparse or hypersparse, not bitmap or
// full, but individual vectors of A and B might have all
// entries present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
#if defined ( GB_PHASE_1_OF_2 )
if (A_and_B_are_disjoint)
{
// only used by GB_wait, which computes A+T where T is the
// matrix of pending tuples for A. The pattern of pending
// tuples is always disjoint with the pattern of A.
cjnz = ajnz + bjnz ;
}
else
#endif
if (adense && bdense)
{
//----------------------------------------------------------
// Method01: A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ;
#endif
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// Method02: A(:,j) dense, B(:,j) sparse: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Bi [pB + p] ;
int64_t ii = i - iA_first ;
ASSERT (Ai [pA + ii] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + ii, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// Method03: A(:,j) sparse, B(:,j) dense: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
int64_t i = p + iB_first ;
Ci [pC + p] = i ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Ai [pA + p] ;
int64_t ii = i - iB_first ;
ASSERT (Bi [pB + ii] == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + ii, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method04: A(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
#endif
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method05: B(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
#endif
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method06: last A(:,j) comes before 1st B(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
pC += ajnz ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
#endif
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method07: last B(:,j) comes before 1st A(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
pC += bjnz ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
#endif
}
#if defined ( GB_PHASE_1_OF_2 )
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// Method08: A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found) cjnz-- ;
}
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// Method09: B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found) cjnz-- ;
}
}
#endif
else
{
//----------------------------------------------------------
// Method10: A(:,j) and B(:,j) about the same sparsity
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
#endif
pA++ ;
}
else if (iA > iB)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
#endif
pB++ ;
}
else
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
#endif
pA++ ;
pB++ ;
}
#if defined ( GB_PHASE_2_OF_2 )
pC++ ;
#else
cjnz++ ;
#endif
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
ajnz = (pA_end - pA) ;
bjnz = (pB_end - pB) ;
ASSERT (ajnz == 0 || bjnz == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz += ajnz + bjnz ;
#else
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC + p), Ax, pA + p, A_iso) ;
}
#endif
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC + p), Bx, pB + p, B_iso) ;
}
#endif
ASSERT (pC + ajnz + bjnz == pC_end) ;
#endif
}
}
else if (sparse_mask_is_easy)
{
//--------------------------------------------------------------
// special case: M is present and very easy to use
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse full
// sparse sparse full sparse
// sparse sparse full full
// A and B are sparse, hypersparse or full, not bitmap.
ASSERT (!A_is_bitmap) ;
ASSERT (!B_is_bitmap) ;
ASSERT (Mask_struct) ;
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
#if defined ( GB_PHASE_1_OF_2 )
// M is structural, and sparse or hypersparse, so every entry
// in the mask is guaranteed to appear in A+B. The symbolic
// count is thus trivial.
cjnz = mjnz ;
#else
// copy the pattern into C (:,j)
int64_t pC_start = pC ;
int64_t pM_start = pM ;
memcpy (Ci + pC, Mi + pM, mjnz * sizeof (int64_t)) ;
int64_t pA_offset = pA_start - iA_first ;
int64_t pB_offset = pB_start - iB_first ;
if (adense && B == M)
{
//----------------------------------------------------------
// Method11: A dense, B == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pA_offset + i, vlen) == i) ;
ASSERT (GBI (Bi, pM, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA_offset + i, A_iso) ;
GB_GETB (bij, Bx, pM, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else if (bdense && A == M)
{
//----------------------------------------------------------
// Method12: B dense, A == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pM, vlen) == i) ;
ASSERT (GBI (Bi, pB_offset + i, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pM, A_iso) ;
GB_GETB (bij, Bx, pB_offset + i, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else // (A == M) && (B == M)
{
//----------------------------------------------------------
// Method13: A == M == B: all three matrices the same
//----------------------------------------------------------
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
#if GB_OP_IS_SECOND
GB_GETB (t, Bx, pM, B_iso) ;
#else
GB_GETA (t, Ax, pM, A_iso) ;
#endif
GB_BINOP (GB_CX (pC), t, t, Mi [pM], j) ;
}
#endif
}
#endif
}
else if (M_is_sparse_or_hyper)
{
//--------------------------------------------------------------
// Method14: C and M are sparse or hypersparse
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (*)
// sparse sparse sparse bitmap (*)
// sparse sparse sparse full (*)
// sparse sparse bitmap sparse (*)
// sparse sparse bitmap bitmap (+)
// sparse sparse bitmap full (+)
// sparse sparse full sparse (*)
// sparse sparse full bitmap (+)
// sparse sparse full full (+)
// (*) This method is efficient except when either A or B are
// sparse, and when M is sparse but with many entries. When M
// is sparse and either A or B are sparse, the method is
// designed to be very efficient when M is very sparse compared
// with A and/or B. It traverses all entries in the sparse M,
// and (for sparse A or B) does a binary search for entries in
// A or B. In that case, if M has many entries, the mask M
// should be ignored, and C=A+B should be computed without any
// mask. The test for when to use M here should ignore A or B
// if they are bitmap or full.
// (+) TODO: if C and M are sparse/hyper, and A and B are
// both bitmap/full, then use GB_emult_03_template instead,
// but with (Ab [p] || Bb [p]) instead of (Ab [p] && Bb [p]).
// A and B can have any sparsity pattern (hypersparse,
// sparse, bitmap, or full).
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) + B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
bool afound ;
if (adense)
{
// A is dense, bitmap, or full; use quick lookup
pA = pA_start + (i - iA_first) ;
afound = GBB (Ab, pA) ;
}
else if (A == M)
{
// A is aliased to M
pA = pM ;
afound = true ;
}
else
{
// A is sparse; use binary search. This is slow unless
// M is very sparse compared with A.
int64_t apright = pA_end - 1 ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
}
ASSERT (GB_IMPLIES (afound, GBI (Ai, pA, vlen) == i)) ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
bool bfound ;
if (bdense)
{
// B is dense; use quick lookup
pB = pB_start + (i - iB_first) ;
bfound = GBB (Bb, pB) ;
}
else if (B == M)
{
// B is aliased to M
pB = pM ;
bfound = true ;
}
else
{
// B is sparse; use binary search. This is slow unless
// M is very sparse compared with B.
int64_t bpright = pB_end - 1 ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
}
ASSERT (GB_IMPLIES (bfound, GBI (Bi, pB, vlen) == i)) ;
//----------------------------------------------------------
// C(i,j) = A(i,j) + B(i,j)
//----------------------------------------------------------
if (afound && bfound)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
else if (afound)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
else if (bfound)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//--------------------------------------------------------------
// M is bitmap or full, for either C<M>=A+B or C<!M>=A+B
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// This method is very efficient for any mask, and should
// always be used if M is bitmap or full, even if the mask must
// also be applied later in GB_mask or GB_accum_mask.
// Exploiting the mask here adds no extra search time, and it
// reduces the size of C on output.
// GB_GET_MIJ: get M(i,j) where M is bitmap or full
#undef GB_GET_MIJ
#define GB_GET_MIJ(i) \
int64_t pM = pM_start + i ; \
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \
if (Mask_comp) mij = !mij ;
// A and B are sparse or hypersparse, not bitmap or full,
// but individual vectors of A and B might have all entries
// present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
int64_t pM_start = j * vlen ;
if (adense && bdense)
{
//----------------------------------------------------------
// Method15: A(:,j) and B(:,j) dense, M bitmap/full
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA + p, A_iso) ;
GB_GETB (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
}
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method16: A(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method17: B(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method18:last A(:,j) before 1st B(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method19:last B(:,j) before 1st A(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
}
else
{
//----------------------------------------------------------
// Method20: merge A(:,j) and B(:,j), M bitmap/full
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
GB_GET_MIJ (iA) ;
if (mij)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
pA++ ;
}
else if (iA > iB)
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
pB++ ;
}
else
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_GETA (aij, Ax, pA, A_iso) ;
GB_GETB (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
pC++ ;
#endif
}
pA++ ;
pB++ ;
}
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t iA = Ai [pA] ;
GB_GET_MIJ (iA) ;
if (mij)
{
// C (iA,j) = A (iA,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t iB = Bi [pB] ;
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
#endif
pC++ ;
#endif
}
}
}
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
badloop.c | /*
DESCRIPTION:
Parallelizing an inner loop with dependences
Backward dependency
for (i=0; i<VSIZE-1; i++) {
V[i] = ( V[i] + V[i+1] ) / 2;
}
Method: Try direct parallelization with PARALLEL FOR
Result: NON-CORRECT!!!
*/
#include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#define NUM_THREADS 4
#define VSIZE 100
void main()
{
int V[ VSIZE+1 ],i,U[VSIZE+1];
for (i=0; i<VSIZE+1; i++) {
V[i]= i ;
U[i] = i ;
}
for(i=0;i<VSIZE;i++) U[i] = U[i] + U[i+1];
printf("Before Parallel Loop\n");
/* for( i = 0 ; i<10;i++) printf("V[%d]=%d ",i,V[i]); */
printf("\n");
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel for default(none) shared(V) private(i) schedule(static)
for (i=0; i<VSIZE; i++) {
V[i] = V[i] + V[i+1];
}
printf("After Parallel Loop\n");
/* for( i = 0 ; i<10;i++) printf("V[%d]=%d ",i,V[i]); */
printf("\n");
for( i=0;i<VSIZE;i++)
if ( V[i] != U[i] ) printf("Found V[%d] error\n", i);
}
|
revcomp.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// This string/character array is used to convert characters into the
// complementing character.
#define COMPLEMENT_LOOKUP \
" "\
/*ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz */\
" TVGH CD M KN YSAABW R TVGH CD M KN YSAABW R"
// This controls the size of reads from the input and is also used as the
// initial sequence_Capacity.
#define READ_SIZE 16384
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
static intnative_t next_Sequence_Number_To_Output=1;
static void process_Sequence(char * sequence, const intnative_t sequence_Size
, const intnative_t sequence_Number){
// Free up any memory we didn't need.
sequence=realloc(sequence, sequence_Size);
// Set up pointers to the front_Pos and bac_Pos, advance front_Pos to the
// first character on the next line, and then make sure front_Pos and
// back_Pos start out pointing to non-line feed characters (unless all the
// characters happen to be line feeds in which case front_Pos will go past
// back_Pos causing the reversing and complementing loop to do nothing.
char * front_Pos=sequence, * back_Pos=sequence+sequence_Size-1;
while(*front_Pos++!='\n');
while(*front_Pos=='\n' && front_Pos<=back_Pos) front_Pos++;
while(*back_Pos=='\n' && front_Pos<=back_Pos) back_Pos--;
// Reverse and complement the sequence.
while(front_Pos<=back_Pos){
const char temp=COMPLEMENT_LOOKUP[(unsigned char)*front_Pos];
*front_Pos=COMPLEMENT_LOOKUP[(unsigned char)*back_Pos];
*back_Pos=temp;
// Skip over line feeds.
while(*++front_Pos=='\n');
while(*--back_Pos=='\n');
}
// Wait for our turn to output the altered sequence and then output it.
#pragma omp flush(next_Sequence_Number_To_Output)
while(sequence_Number!=next_Sequence_Number_To_Output){
#pragma omp flush(next_Sequence_Number_To_Output)
}
fwrite(sequence, 1, sequence_Size, stdout);
next_Sequence_Number_To_Output++;
#pragma omp flush(next_Sequence_Number_To_Output)
// Free the memory for the altered sequence.
free(sequence);
}
int main(){
#pragma omp parallel
{
#pragma omp single
{
// Allocate memory for the initial sequence (assuming there is one).
intnative_t sequence_Capacity=READ_SIZE, sequence_Size=0
, sequence_Number=1;
char * sequence=malloc(sequence_Capacity);
// Read in sequence data until we reach the end of the file or
// encounter an error.
for(intnative_t bytes_Read; (bytes_Read
=fread(&sequence[sequence_Size], 1, READ_SIZE, stdin)); ){
// Search the read in chunk of data for a '>' to see if any
// sequences are being started.
for(char * sequence_Start; (sequence_Start
=memchr(&sequence[sequence_Size], '>', bytes_Read)); ){
// Update the sequence_Size to reflect any data before the
// '>' that was read in.
const intnative_t number_Of_Preceding_Bytes
=sequence_Start-&sequence[sequence_Size];
sequence_Size+=number_Of_Preceding_Bytes;
// If there is any data for the current sequence, then
// start processing it.
if(sequence_Size){
// Allocate memory for a new sequence and copy the '>'
// & any data following it to the new sequence.
char * const new_Sequence=malloc(READ_SIZE);
memcpy(new_Sequence, sequence_Start
, bytes_Read-number_Of_Preceding_Bytes);
// Process the current sequence and have another thread
// do the work if OpenMP is enabled and there is more
// than one CPU core.
#pragma omp task\
firstprivate(sequence, sequence_Size, sequence_Number)
{
process_Sequence(sequence, sequence_Size
, sequence_Number);
}
// Update variables to reflect the new sequence.
sequence=new_Sequence;
sequence_Capacity=READ_SIZE;
sequence_Size=0;
sequence_Number++;
}
// Update sequence_Size and bytes_Read to reflect the read
// in '>' and any data that preceded it.
sequence_Size++;
bytes_Read-=number_Of_Preceding_Bytes+1;
}
// Update sequence_Size to reflect the bytes that were read in.
sequence_Size+=bytes_Read;
// If there potentially isn't enough free space for all the data
// from the next read, then double the capacity of the sequence.
if(sequence_Size>sequence_Capacity-READ_SIZE)
sequence=realloc(sequence, sequence_Capacity*=2);
}
// If there is any data for a last sequence, process it, otherwise
// just free the sequence memory.
if(sequence_Size)
process_Sequence(sequence, sequence_Size, sequence_Number);
else
free(sequence);
}
}
return 0;
}
/*
NOTES:
64-bit Ubuntu quad core
gcc (Ubuntu 6.3.0-12ubuntu2) 6.3.0 20170406
Fri, 14 Apr 2017 17:30:03 GMT
MAKE:
/usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -funroll-loops -fopenmp revcomp.gcc-6.c -o revcomp.gcc-6.gcc_run
rm revcomp.gcc-6.c
0.13s to complete and log all make actions
COMMAND LINE:
./revcomp.gcc-6.gcc_run 0 < revcomp-input25000000.txt
(TRUNCATED) PROGRAM OUTPUT:
>ONE Homo sapiens alu
GCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGA
TCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCC
GAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAG
AGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGC
CCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTT
GAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTG
CAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGAT
TACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCA
CCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTC
CCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTC
GCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTC
CCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGC
CACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAG
GCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGG
ATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCA
GGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCG
ATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGC
TAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAAC
TCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGA
GCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAG
TGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTC
AGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATT
TTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGG
TGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCG
GCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCG
GCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTA
GCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACG
GGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCC
TCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGAC
GGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACC
TCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAG
GCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATG
TTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAA
GTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCT
GTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGG
TTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCA
CGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGG
TCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTAC
AGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTG
GAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCT
CCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATT
TTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTG
ACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCAC
CGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCG
CGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCT
CCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAG
TAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATC
CGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTT
TTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCA
CTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGG
GATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTT
TCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGC
CTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGT
CTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGC
CTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCG
CGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGC
CAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCT
GGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGC
CCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAA
GCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCC
GGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCG
AACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCG
TGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTG
CAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGC
CTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGT
ATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTC
AGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGC
CCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATC
TCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGA
GTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAG
ACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCC
GCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGA
GACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCA
ACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTA
CAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACC
ATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCC
AAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGC
TCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCC
GGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCA
CCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGC
TGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGAT
TACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGG
CTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGAT
TCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTA
ATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTC
CTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGC
CACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTG
GCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAG
CCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTT
TAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTG
ATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGC
CTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGC
TCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGC
TGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGG
GTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTC
GGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGG
AGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTC
CGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGC
GCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTT
GGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGT
GCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGT
CGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTT
CAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACG
CCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTC
TCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAG
GCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGA
GTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCC
TGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTT
TGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGAC
CTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCG
CGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCG
ATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCC
CGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTA
GAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCG
CCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTT
TGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACT
GCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGA
TTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTC
ACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCT
CCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCT
CGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCT
CCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCG
CCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCA
GGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGG
GATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCC
AGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGC
GATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGG
CTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAA
CTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTG
AGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCA
GTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCT
CAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTAT
TTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAG
GTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCC
GGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTC
GGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGT
AGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGAC
GGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGC
CTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGA
CGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAAC
CTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACA
GGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCAT
GTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAA
AGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTC
TGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGG
GTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACC
ACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTG
GTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTA
CAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCT
GGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTC
TCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAAT
TTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCT
GACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCA
CCGCGCCCGGCCTTTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGC
GCGATCTCGGCTCACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCC
TCCCGAGTAGCTGGGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTA
GTAGAGACGGGGTTTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGAT
CCGCCCGCCTCGGCCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCT
TTTTGAGACGGAGTCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTC
ACTGCAACCTCCGCCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTG
GGATTACAGGCGCGCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGT
TTCACCATGTTGGCCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGG
CCTCCCAAAGTGCTGGGATTACAGGCGTGAGCCACCGCGCCCGGCCTTTTTGAGACGGAG
TCTCGCTCTGTCGCCCAGGCTGGAGTGCAGTGGCGCGATCTCGGCTCACTGCAACCTCCG
CCTCCCGGGTTCAAGCGATTCTCCTGCCTCAGCCTCCCGAGTAGCTGGGATTACAGGCGC
GCGCCACCACGCCCGGCTAATTTTTGTATTTTTAGTAGAGACGGGGTTTCACCATGTTGG
CCAGGCTGGTCTCGAACTCCTGACCTCAGGTGATCCGCCCGCCTCGGCCTCCCAAAGTGC
TGGGATTACAGGCGTGAGCCACCGCGCCCGG
*/
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
bool operator< (const EdgePair& rhs) const {
return u == rhs.u ? v < rhs.v : u < rhs.u;
}
bool operator== (const EdgePair& rhs) const {
return (u == rhs.u) && (v == rhs.v);
}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(24*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(24*t3+Nx+20,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__ainv_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_uint8
// op(A') function: GB_tran__ainv_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_uint8
(
float *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mpinpb.h |
//---------------------------------------------------------------------
//---------------------------------------------------------------------
#ifndef __MPINPB_H
#define __MPINPB_H
#ifdef G_MAIN
int node, no_nodes, total_nodes, root;
int active;
#else
extern int node, no_nodes, total_nodes, root;
extern int active;
#endif
#ifdef _OPENMP
#pragma omp threadprivate (node, no_nodes, total_nodes, root, active)
#endif
#endif
|
openmp2.c | #include <stdio.h>
#include <omp.h>
// COMPILE WIH -fopenmp flag
#define SIZE 20000
int main() {
int a[SIZE];
int b[SIZE];
int c[SIZE];
int chunk = 100;
int j;
for(int i = 0; i < SIZE; i++) {
a[i] = i;
b[i] = 2 * i;
}
#pragma omp parallel shared(a, b, c, chunk) private(j)
{
#pragma omp for schedule(dynamic, chunk) nowait
for(int j = 0; j < SIZE; j++) {
c[j] = a[j] + b[j];
}
}
return 0;
} |
LocalFilterScore.h | /*
* LocalFilterScore.h
*
* Created on: 20.11.2014
* Author: Michael Hamann, Gerd Lindner
*/
#ifndef LOCALLOGSCORE_H
#define LOCALLOGSCORE_H
#include "../edgescores/EdgeScore.h"
#include "../auxiliary/Parallel.h"
namespace NetworKit {
/**
* Local filtering edge scoring. Edges with high score are more important.
*
* Edges are ranked locally, the top d^e (logarithmic, default) or 1+e*(d-1) edges (non-logarithmic) are kept.
* For equal attribute values, neighbors of low degree are preferred.
*/
template<typename InType>
class LocalFilterScore : public EdgeScore<double> {
public:
/**
* Initialize the local edge filtering score.
*
* @param G The graph for which the score shall be.
* @param attribute The input attribute according to which the edges shall be fitlered locally.
* @param logarithmic If the score shall be logarithmic in the rank (then d^e edges are kept). Linear otherwise.
*/
LocalFilterScore(const Graph& G, const std::vector< InType > &attribute, bool logarithmic = true) :
EdgeScore<double>(G), attribute(attribute), logarithmic(logarithmic) {}
/**
* Execute the algorithm.
*/
virtual void run() {
if (!G.hasEdgeIds()) {
throw std::runtime_error("edges have not been indexed - call indexEdges first");
}
/*
* For each edge, we calculate the minimum required sparsification exponent e
* such that the edge is contained in the sparse graph.
*/
std::vector<std::atomic<double>> sparsificationExp(G.upperEdgeIdBound());
G.balancedParallelForNodes([&](node i) {
count d = G.degree(i);
/*
* The top d^e edges (sorted by similarity in descending order)
* are to be kept in the sparse graph.
*/
std::vector<edgeid> neighbors;
neighbors.reserve(d);
G.forNeighborsOf(i, [&](node _i, node j, edgeid eid) {
neighbors.emplace_back(eid);
});
std::sort(neighbors.begin(), neighbors.end(), [&](const edgeid& e1, const edgeid& e2) {
return attribute[e1] > attribute[e2];
});
count rank = 0;
count numSame = 1;
InType oldValue = std::numeric_limits<InType>::lowest();
for (edgeid eid : neighbors) {
if (attribute[eid] != oldValue) {
rank += numSame;
numSame = 1;
} else {
++numSame;
}
double e = 1.0;
if (d > 1) {
if (logarithmic) {
e = 1.0 - log(rank) / log(d);
} else {
e = 1.0 - (rank-1) * 1.0 / (d - 1); // Keep top 1 + e * (d-1) edges
}
}
Aux::Parallel::atomic_max(sparsificationExp[eid], e);
}
});
scoreData.clear();
scoreData.resize(G.upperEdgeIdBound());
#pragma omp parallel for
for (index i = 0; i < scoreData.size(); ++i) {
scoreData[i] = sparsificationExp[i];
}
hasRun = true;
}
virtual double score(node u, node v) {
throw std::runtime_error("Not implemented: Use scores() instead.");
}
virtual double score(edgeid eid) {
throw std::runtime_error("Not implemented: Use scores() instead.");
}
private:
const std::vector<InType>& attribute;
bool logarithmic;
};
} // namespace NetworKit
#endif // LOCALLOGSCORE_H
|
opencl_office2010_fmt_plug.c | /* MS Office 2010 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* OpenCL support by magnum.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum and it is hereby released to the general public
* under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_office2010;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_office2010);
#else
#include "sha.h"
#include "aes.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "common-opencl.h"
#include "office_common.h"
#include "config.h"
#define PLAINTEXT_LENGTH 51
#define UNICODE_LENGTH 104 /* In octets, including 0x80 */
#define FORMAT_LABEL "office2010-opencl"
#define FORMAT_NAME "MS Office 2010"
#define OCL_ALGORITHM_NAME "SHA1 OpenCL"
#define CPU_ALGORITHM_NAME " AES"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (100,000 iterations)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_LENGTH 16
#define SALT_SIZE sizeof(*cur_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
/* 2010-Default_myhovercraftisfullofeels_.docx */
{"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"},
{NULL}
};
static ms_office_custom_salt *cur_salt;
static int *cracked, any_cracked;
static char *saved_key; /* Password encoded in UCS-2 */
static int *saved_len; /* UCS-2 password length, in octets */
static char *saved_salt;
static unsigned char *key; /* Output key from kernel */
static int new_keys, spincount;
static struct fmt_main *self;
static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key, cl_spincount;
static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key;
static cl_kernel GenerateSHA1pwhash, Generate2010key;
#define HASH_LOOPS 500 /* Lower figure gives less X hogging */
#define ITERATIONS 100000
#define STEP 0
#define SEED 128
static const char * warn[] = {
"xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: "
};
static int split_events[] = { 3, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2010key));
return s;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
int i;
int bench_len = strlen(tests[0].plaintext) * 2;
gws *= ocl_v_width;
pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key");
memset(saved_key, 0, UNICODE_LENGTH * gws);
pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len");
for (i = 0; i < gws; i++)
saved_len[i] = bench_len;
pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt");
memset(saved_salt, 0, SALT_LENGTH);
cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device state buffer");
pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 32 * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory verifier keys");
memset(key, 0, 32 * gws);
cl_spincount = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(cl_int), &spincount, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping spincount");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 2, sizeof(cl_mem), (void*)&cl_spincount), "Error setting argument 2");
cracked = mem_alloc(sizeof(*cracked) * gws);
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings");
HANDLE_CLERROR(clReleaseMemObject(cl_spincount), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer");
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(Generate2010key), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void clear_keys(void)
{
memset(saved_key, 0, UNICODE_LENGTH * global_work_size * ocl_v_width);
memset(saved_len, 0, sizeof(*saved_len) * global_work_size * ocl_v_width);
}
static void set_key(char *key, int index)
{
UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH];
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(utfkey);
/* Prepare for GPU */
utfkey[saved_len[index]] = 0x80;
saved_len[index] <<= 1;
new_keys = 1;
}
static void set_salt(void *salt)
{
cur_salt = (ms_office_custom_salt *)salt;
memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH);
spincount = cur_salt->spinCount;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_spincount, CL_FALSE, 0, 4, &spincount, 0, NULL, NULL), "failed in clEnqueueWriteBuffer spincount");
}
static void init(struct fmt_main *_self)
{
static char valgo[32] = "";
self = _self;
opencl_prepare_dev(gpu_id);
if ((ocl_v_width = opencl_get_vector_width(gpu_id,
sizeof(cl_int))) > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width);
self->params.algorithm_name = valgo;
}
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS,
UNICODE_LENGTH,
ocl_v_width);
opencl_init("$JOHN/kernels/office2010_kernel.cl", gpu_id,
build_opts);
// create kernel to execute
GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
Generate2010key = clCreateKernel(program[gpu_id], "Generate2010key", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn,
3, self, create_clobj, release_clobj,
2 * ocl_v_width * UNICODE_LENGTH, 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, ITERATIONS + 4, 0,
(cpu(device_info[gpu_id]) ?
1000000000 : 10000000000ULL));
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t gws, scalar_gws;
size_t *lws = local_work_size ? &local_work_size : NULL;
gws = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size);
scalar_gws = gws * ocl_v_width;
if (any_cracked) {
memset(cracked, 0, count * sizeof(*cracked));
any_cracked = 0;
}
if (ocl_autotune_running || new_keys) {
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key");
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len");
new_keys = 0;
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel");
for (index = 0; index < (ocl_autotune_running ? 1 : spincount / HASH_LOOPS); index++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel");
// read back verifier keys
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 32 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
SHA_CTX ctx;
unsigned char hash[20];
unsigned char decryptedVerifierHashInputBytes[16];
unsigned char decryptedVerifierHashBytes[32];
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[32*index], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &key[32*index+16], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA1_Final(hash, &ctx);
if (!memcmp(hash, decryptedVerifierHashBytes, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *get_key(int index)
{
UTF16 buf[PLAINTEXT_LENGTH + 1];
memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]);
buf[saved_len[index] >> 1] = 0;
return (char*)utf16_to_enc(buf);
}
struct fmt_main fmt_opencl_office2010 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
{
"iteration count",
},
tests
}, {
init,
done,
reset,
fmt_default_prepare,
ms_office_common_valid_2010,
fmt_default_split,
fmt_default_binary,
ms_office_common_get_salt,
{
ms_office_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
// const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
// for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
// {
// ierr = itEl->Check(rCurrentProcessInfo);
// if (ierr != 0)
// break;
// }
const auto &r_current_process_info = rModelPart.GetProcessInfo();
for (const auto &r_element : rModelPart.Elements())
{
ierr = r_element.Check(r_current_process_info);
if (ierr != 0)
{
break;
}
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
bool SolveSolutionStep() override
{
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("\nSolution with two_step_vp_strategy at t=") << currentTime << "s" << std::endl;
if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
this->SetBlockedFlag();
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
this->UpdateStressStrain();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
return converged;
}
void FinalizeSolutionStep() override
{
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void SetBlockedFlag()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
std::vector<array_1d<double, 3>> nodesCoordinates;
nodesCoordinates.resize(numNodes);
(itElem)->Set(BLOCKED, false);
(itElem)->Set(ISOLATED, false);
unsigned int freeSurfaceNodes = 0;
unsigned int freeSurfaceRigidNodes = 0;
unsigned int rigidNodes = 0;
unsigned int isolatedNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if (itElem->GetGeometry()[i].Is(FREE_SURFACE))
{
freeSurfaceNodes++;
if (itElem->GetGeometry()[i].Is(RIGID))
{
freeSurfaceRigidNodes++;
}
}
else if (itElem->GetGeometry()[i].Is(RIGID))
{
rigidNodes++;
}
nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates();
ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS);
if (neighb_elems.size() == 1)
{
isolatedNodes++;
}
}
// if (dimension == 3 && (freeSurfaceNodes == numNodes || (freeSurfaceNodes + rigidNodes) == numNodes))
if (dimension == 3)
{
double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C)
a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D)
a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D)
a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]);
b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]);
c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]);
double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]);
b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]);
c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]);
double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)));
double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes))
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
// else if (fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999)
// {
// (itElem)->Set(BLOCKED, true);
// // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
// }
}
if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1))
{
(itElem)->Set(ISOLATED, true);
(itElem)->Set(BLOCKED, false);
}
}
}
KRATOS_CATCH("");
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
if (it == 0)
{
velocityNorm = this->ComputeVelocityNorm();
}
double DvErrorNorm = NormDv / velocityNorm;
unsigned int iterationForCheck = 2;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->FixTimeStepMomentum(DvErrorNorm, fixedTimeStep);
}
else if (it > iterationForCheck)
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->CheckMomentumConvergence(DvErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
}
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
bool fixedTimeStep = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
if (it == 0)
{
NormP = this->ComputePressureNorm();
}
double DpErrorNorm = NormDp / (NormP);
// Check convergence
if (it == (maxIt - 1))
{
KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->CheckContinuityConvergence(DpErrorNorm, fixedTimeStep);
}
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
const Vector &N = row(NContainer, 0);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
double ComputeVelocityNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
return NormV;
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / (NormP);
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
double ComputePressureNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
return NormP;
}
bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.99999;
bool converged = false;
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
fixedTimeStep = true;
// rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
if (DvErrorNorm > 0.9999)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD PRESSURE CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.1" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
bool converged = false;
if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
omp_orphan.c | /******************************************************************************
* FILE: omp_orphan.c
* DESCRIPTION:
* OpenMP Example - Parallel region with an orphaned directive - C/C++ Version
* This example demonstrates a dot product being performed by an orphaned
* loop reduction construct. Scoping of the reduction variable is critical.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 06/30/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN], sum;
float dotprod ()
{
int i,tid;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i < VECLEN; i++) {
sum = sum + (a[i]*b[i]);
printf(" tid= %d i=%d\n",tid,i);
}
}
int main (int argc, char *argv[])
{
int i;
for (i=0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
#pragma omp parallel
dotprod();
printf("Sum = %f\n",sum);
}
|
TagList.h | //
// Created by lopea on 5/29/20.
//
#ifndef GIGAENGINE_TAGLIST_H
#define GIGAENGINE_TAGLIST_H
#include <cstdint>
#include <vector>
#include <deque>
#include <set>
typedef uint64_t Entity;
class GenericTagList
{
public:
virtual ~GenericTagList() = default;
};
template<typename T>
class TagList : public GenericTagList
{
public:
void AddTag(Entity entity);
bool HasTag(Entity entity) const;
void RemoveTag(Entity entity);
void RemoveTags(std::vector<Entity>& entities);
void clear();
bool empty() const;
std::vector<Entity> GetOverlappingEntities(const std::vector<Entity>& reference);
~TagList();
private:
std::deque<Entity> entities_;
bool sorted_ = false;
};
template<typename T>
void TagList<T>::AddTag(Entity entity)
{
#pragma omp critical
{
entities_.push_back(entity);
sorted_ = false;
}
}
template<typename T>
TagList<T>::~TagList()
{
clear();
}
template<typename T>
bool TagList<T>::HasTag(Entity entity) const
{
return std::find(entities_.begin(),entities_.end(),entity) != entities_.end();
}
template<typename T>
void TagList<T>::RemoveTag(Entity entity)
{
auto it = std::find(entities_.begin(),entities_.end(), entity);
if (it != entities_.end())
{
entities_.erase(it);
}
}
template<typename T>
void TagList<T>::clear()
{
entities_.clear();
}
template<typename T>
bool TagList<T>::empty() const
{
return entities_.empty();
}
template<typename T>
std::vector<Entity> TagList<T>::GetOverlappingEntities(const std::vector<Entity> &reference)
{
std::vector<Entity> result;
if(!sorted_)
{
std::sort(entities_.begin(),entities_.end());
sorted_ = true;
}
std::set_intersection(entities_.begin(), entities_.end(),reference.begin(),reference.end(), std::back_inserter(result));
return result;
}
template<typename T>
void TagList<T>::RemoveTags(std::vector<Entity> &entities)
{
for(Entity entity : entities)
{
RemoveTag(entities);
}
}
#endif //GIGAENGINE_TAGLIST_H
|
GB_binop__times_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16)
// A*D function (colscale): GB (_AxD__times_int16)
// D*A function (rowscale): GB (_DxB__times_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16)
// C=scalar+B GB (_bind1st__times_int16)
// C=scalar+B' GB (_bind1st_tran__times_int16)
// C=A+scalar GB (_bind2nd__times_int16)
// C=A'+scalar GB (_bind2nd_tran__times_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sum_array2.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
float sum(float *X, float *Y, float *answer) {
float result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
answer[i] = X[i] + Y[i];
}
return result;
}
// Debug functions
float sum_serial(float *X, float *Y, float *answer) {
float result = 0;
for (int i = 0; i<N; i++) {
answer[i] = X[i] + Y[i];
}
return result;
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float *Y = malloc(sizeof(float)*N);
float *answer = malloc(sizeof(float)*N);
float *answer_serial = malloc(sizeof(float)*N);
srand(time(NULL));
init(X);
init(Y);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum(X, Y, answer);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum_serial(X, Y, answer_serial);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("+");
print_vector(Y);
puts("=\n");
printf("SIMD:\n");
print_vector(answer);
puts("---------------------------------");
printf("Serial:\n");
print_vector(answer_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
free(X);
free(Y);
free(answer);
free(answer_serial);
return 0;
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
interior,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
width=frame_info->width-frame_info->x-bevel_width;
height=frame_info->height-frame_info->y-bevel_width;
if ((width < image->columns) || (height < image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if (frame_image->matte_color.opacity != OpaqueOpacity)
frame_image->matte=MagickTrue;
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&interior);
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&interior);
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&interior);
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
frame_view=AcquireCacheView(frame_image);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior to interior color.
*/
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&interior,q,frame_indexes);
q++;
frame_indexes++;
}
else
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
{
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
(void) CompositeImage(frame_image,image->compose,image,x,y);
}
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=(Quantum) QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=(Quantum) QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetRedPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetRedPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetGreenPixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetGreenPixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetBluePixelComponent(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetBluePixelComponent(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
rose_matrixmultiply2.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#define N 1000
#define M 1000
#define K 1000
#include "omp.h"
int i;
int j;
int k;
double a[1000][1000];
double b[1000][1000];
double c[1000][1000];
int mmm()
{
//#pragma omp parallel for private(i,j,k) shared(a,b,c)
#pragma omp parallel for private (i,j,k)
for (i = 0; i <= 999; i += 1) {
for (k = 0; k <= 999; k += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 999; j += 1) {
c[i][j] = c[i][j] + a[i][k] * b[k][j];
}
}
}
return 0;
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include <limits>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda/utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template <typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template <>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template <>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: { \
const OpReqType ReqType = kWriteTo; \
{ __VA_ARGS__ } \
} break; \
case kAddTo: { \
const OpReqType ReqType = kAddTo; \
{ __VA_ARGS__ } \
} break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: { \
const OpReqType ReqType = kNullOp; \
{ __VA_ARGS__ } \
} break; \
case kWriteInplace: \
case kWriteTo: { \
const OpReqType ReqType = kWriteTo; \
{ __VA_ARGS__ } \
} break; \
case kAddTo: { \
const OpReqType ReqType = kAddTo; \
{ __VA_ARGS__ } \
} break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{ __VA_ARGS__ } \
} else if (NDim == 2) { \
const int ndim = 2; \
{ __VA_ARGS__ } \
} else if (NDim == 3) { \
const int ndim = 3; \
{ __VA_ARGS__ } \
} else if (NDim == 4) { \
const int ndim = 4; \
{ __VA_ARGS__ } \
} else if (NDim == 5) { \
const int ndim = 5; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{ __VA_ARGS__ } \
} else if (NDim == 2) { \
const int ndim = 2; \
{ __VA_ARGS__ } \
} else if (NDim == 3) { \
const int ndim = 3; \
{ __VA_ARGS__ } \
} else if (NDim == 4) { \
const int ndim = 4; \
{ __VA_ARGS__ } \
} else if (NDim == 5) { \
const int ndim = 5; \
{ __VA_ARGS__ } \
} else if (NDim == 6) { \
const int ndim = 6; \
{ __VA_ARGS__ } \
} else if (NDim == 7) { \
const int ndim = 7; \
{ __VA_ARGS__ } \
} else if (NDim == 8) { \
const int ndim = 8; \
{ __VA_ARGS__ } \
} else if (NDim == 9) { \
const int ndim = 9; \
{ __VA_ARGS__ } \
} else if (NDim == 10) { \
const int ndim = 10; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: \
case mshadow::kBfloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_BFLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBfloat16: \
LOG(FATAL) << "This operation does not " \
"support bfloat16"; \
break; \
case mshadow::kInt8: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} break; \
case mshadow::kInt8: { \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} break; \
case mshadow::kInt32: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} break; \
case mshadow::kInt64: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not bool"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
typedef uint32_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
typedef int32_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH_EXT_WITH_BOOL(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer and bool types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer and bool types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer and boo; types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt16: { \
typedef int16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint16: { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint32: { \
typedef uint32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint64: { \
typedef uint64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH_EXT(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt16: { \
typedef int16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint16: { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint32: { \
typedef uint32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint64: { \
typedef uint64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"integer types, not bool type"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
LOG(FATAL) << "This operation only support " \
"integer types, not uint8"; \
} break; \
case mshadow::kInt8: { \
LOG(FATAL) << "This operation only support " \
"integer types, not int8"; \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"integer types, not bool"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
#define MXNET_ADD_ALL_TYPES_EXT \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("int16", mshadow::kInt16) \
.add_enum("uint16", mshadow::kUint16) \
.add_enum("uint32", mshadow::kUint32) \
.add_enum("uint64", mshadow::kUint64)
#define MXNET_ADD_ALL_TYPES_EXT_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool) \
.add_enum("int16", mshadow::kInt16) \
.add_enum("uint16", mshadow::kUint16) \
.add_enum("uint32", mshadow::kUint32) \
.add_enum("uint64", mshadow::kUint64)
/* \brief Compute flattened index given coordinates and shape. */
template <int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template <int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim - 1, j = idx; i >= 0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp * shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template <int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template <int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx,
const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim - 1, j = idx; i >= 0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp * shape[i]) * stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template <int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates */
template <int ndim>
MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) {
++(*coord)[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
}
return (*coord)[0] < shape[0];
}
/* Increment coordinates and modify index */
template <int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord,
const Shape<ndim>& shape,
index_t* idx,
const Shape<ndim>& stride) {
++(*coord)[ndim - 1];
*idx += stride[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
*idx = *idx + stride[i - 1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template <int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord,
const Shape<ndim>& shape,
index_t* idx1,
const Shape<ndim>& stride1,
index_t* idx2,
const Shape<ndim>& stride2) {
++(*coord)[ndim - 1];
*idx1 += stride1[ndim - 1];
*idx2 += stride2[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
*idx1 = *idx1 + stride1[i - 1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i - 1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu>* s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template <typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template <typename DType, typename... Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
template <typename OP, int req>
struct mixed_type_unary_op {
typedef OP Operation;
/*! \brief input is one tensor */
template <typename OType, typename IType>
MSHADOW_XINLINE static void Map(index_t i, OType* out, const IType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i])));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template <typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template <typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* in,
const DType value_1,
const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* input_1,
const DType* input_2,
const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* input_1,
const DType* input_2,
const DType* input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
/*! \brief input is a tensor and the output is a boolean tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is two tensors with different type and with a boolean output tensor */
template <typename LType,
typename RType,
typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const LType* lhs, const RType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t* out,
const DType* lhs,
const mshadow::half::half_t* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a double output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t* out,
const DType* lhs,
const mshadow::half::half_t value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a double output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is a tensor and a scalar value with a float output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template <typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template <typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename... Args>
inline static bool Launch(mshadow::Stream<cpu>*, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename... Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu>*, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename PRIMITIVE_OP, typename DType, typename... Args>
static void LaunchTuned(mshadow::Stream<cpu>*, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 ||
!tuned_op<PRIMITIVE_OP, DType>::UseOMP(N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template <typename... Args>
inline static void LaunchEx(mshadow::Stream<cpu>* s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template <typename DType, typename T = OP, typename... Args>
static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template <typename DType, typename T = OP, typename... Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template <typename OP, typename... Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template <typename OP, typename... Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template <typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template <typename... Args>
inline static void Launch(mshadow::Stream<gpu>* s, int N, Args... args) {
if (0 == N)
return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template <typename... Args>
inline static void LaunchEx(mshadow::Stream<gpu>* s, const int N, Args... args) {
if (0 == N)
return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template <int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template <bool val>
struct set_to_bool : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to true and false
*/
using set_true = set_to_bool<true>;
using set_false = set_to_bool<false>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
GB_binop__pair_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pair_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__pair_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pair_int64)
// A*D function (colscale): GB (_AxD__pair_int64)
// D*A function (rowscale): GB (_DxB__pair_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = 1
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT64 || GxB_NO_PAIR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pair_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pair_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pair_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
main.c | #include <malloc.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "MyUtils.h"
// 线程数
int n_threads = 1;
// 命令行参数
#define THREAD_NUM_ARG 1
#define DIR_ARG 2
#define DICT_ARG 3
#define RES_ARG 4
// 字典文件内容读取缓存
#define DICT_FILE_BUFFER_SIZE 1000000
char dict_file_buffer[DICT_FILE_BUFFER_SIZE];
int main(int argc, char *argv[]) {
int dict_size; // 字典大小(即:字典中有多少个词)
int file_count; // 目标目录下总文件文件个数
char **filenames; // 目标目录下所有文件的文件名
int **vectors; // 存放所有文件的文档向量
// 异常情况报错提示
if (argc != 5) {
printf("程序需要输入4个参数,用法如下:\n");
printf("%s <n_threads> <dir> <dict> <results>\n", argv[0]);
exit(-1);
}
// 设置线程数
n_threads = atoi(argv[THREAD_NUM_ARG]);
// 设置线程数
omp_set_num_threads(n_threads);
// 计时开始
double ts = omp_get_wtime();
/* ++++++++++ 主体工作部分 ++++++++++ */
// 根据字典文件内容,构建Hash
readAll(dict_file_buffer, argv[DICT_ARG]);
dict_size = make_dict_Hash(dict_file_buffer);
// 从指定目录下获取文件名列表
file_count = get_names(argv[DIR_ARG], &filenames);
// 分配存放所有文件的文档向量的空间
vectors = (int **)calloc(file_count, sizeof(int *));
#pragma omp parallel for
for (int i = 0; i < file_count; ++i) {
vectors[i] = (int *)calloc(dict_size, sizeof(int));
// 读取文件并生成文档向量
make_profile(filenames[i], dict_size, vectors[i]);
}
// 写入结果文件
write_profiles(argv[RES_ARG], file_count, dict_size, filenames, vectors);
// 计时结束
double te = omp_get_wtime();
printf("Time:%f s\n", te - ts);
} |
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*clust;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (clust=head; clust != (Cluster *) NULL; clust=clust->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(clust->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(clust->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(clust->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(clust->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(clust->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(clust->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) clust->id,q);
break;
}
}
if (clust == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(double *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (double *) NULL) ||
(second_derivative == (double *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
computePDF.c | /**************
computePDF.c
Functions used to compute the PDFs for several dimensionalities.
Copyright (c) 2014, Unai Lopez-Novoa, Jon Saenz, Alexander Mendiburu
and Jose Miguel-Alonso (from Universidad del Pais Vasco/Euskal
Herriko Unibertsitatea)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Universidad del Pais Vasco/Euskal
Herriko Unibertsitatea nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************/
#include "computePDF.h"
#include "linalg.h"
double volumeConstant(int dim)
{
if(dim == 1)
return 2.;
else if(dim == 2)
return acos(-1.);
else if (dim == 3)
return acos(-1.)*4./3.;
else
return unit_sphere_volume(dim);
}
/**** Functions to calculate the PDF of a defined 2D space (box) for a given sample. ****/
//Compute the density in the bounding box of a sample - Function for 2D spaces
void compute2DBox_2D(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos,
double * x0, double * dx, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition)
{
int u,v,l; //Loop variables
double temp; //Will contain the absolute distance value from gridpoint to sample.
double PCdot[2] __attribute__((aligned(64)));
for(gridpoint[0] = lower[0], u = 0; u < tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++)
{
int HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]);
//Compiler flag to inform about structure alignment
__assume_aligned(densValues,64);
__assume_aligned(densPosition,64);
#ifdef _OPENMP
#pragma simd private(PCdot,temp) assert
#endif
for(v = 0; v < tot_ev_per_dim[1]; v++)
{
//Conversion to PC space
PCdot[0] = (eigenvectors->me[0][0] * gridpoint[0]) + (eigenvectors->me[0][1] * (lower[1] + (dx[1] * v)));
PCdot[1] = (eigenvectors->me[1][0] * gridpoint[0]) + (eigenvectors->me[1][1] * (lower[1] + (dx[1] * v)));
//Absolute distance calculation
temp = (((PC[0] - PCdot[0]) * (PC[0] - PCdot[0])) + ((PC[1] - PCdot[1]) * (PC[1] - PCdot[1])) ) / h2;
//If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure
//Vector densPosition will contain the position of the gridpoint in the final PDF structure
#ifdef _OPENMP
//PDFposition
densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]);
densValues[v] = (0.5/cd*(2+2.)*(1.-temp)) * (fabs(temp)<1.);
//If serial version, store the density value of the sample over the gridpoint in the PDF structure
#else
gridpoint[1] = (lower[1] + (dx[1] * v));
dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0];
dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1];
*PDFitem(pdf ,dif_pos, 2) += (0.5/cd*(2+2.)*(1.-temp)) * (fabs(temp)<1.) ;
#endif
}
#ifdef _OPENMP
for(v = 0; v < tot_ev_per_dim[1]; v++)
#pragma omp atomic
pdf->PDF[densPosition[v]] += densValues[v];
#endif
}
}
void compute2DBox_3D(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos,
double * x0,double * dx, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition)
{
int u,v,l; //Loop variables
double temp; //Will contain the absolute distance value from gridpoint to sample.
double PCdot[3] __attribute__((aligned(64)));
//Compiler flag to inform about structure alignment
__assume_aligned(densValues,64);
__assume_aligned(densPosition,64);
for(gridpoint[0] = lower[0], u = 0; u <= tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++)
{
int HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]) + (((gridpoint[2] - x0[2])/ dx[2]) * pdf->pdfcumsize[2]);
#pragma simd private(PCdot) assert
for(v = 0; v <= tot_ev_per_dim[1]; v++)
{
//Conversion to PC space
PCdot[0] = (eigenvectors->me[0][0] * gridpoint[0]) + (eigenvectors->me[0][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[0][2] * gridpoint[2]);
PCdot[1] = (eigenvectors->me[1][0] * gridpoint[0]) + (eigenvectors->me[1][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[1][2] * gridpoint[2]);
PCdot[2] = (eigenvectors->me[2][0] * gridpoint[0]) + (eigenvectors->me[2][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[2][2] * gridpoint[2]);
//Absolute distance calculation
temp = (((PC[0] - PCdot[0]) * (PC[0] - PCdot[0])) + ((PC[1] - PCdot[1]) * (PC[1] - PCdot[1])) + ((PC[2] - PCdot[2]) * (PC[2] - PCdot[2]))) / h2;
//If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure
//Vector densPosition will contain the position of the gridpoint in the final PDF structure
#ifdef _OPENMP
//PDFposition
densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]);
densValues[v] = (0.5/cd*(3+2.)*(1.-temp)) * (fabs(temp)<1.);
//If serial version, store the density value of the sample over the gridpoint in the PDF structure
#else
gridpoint[1] = (lower[1] + (dx[1] * v));
dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0];
dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1];
dif_pos[2] = (gridpoint[2] - x0[2])/ dx[2];
*PDFitem(pdf ,dif_pos, 3) += (0.5/cd*(3+2.)*(1.-temp)) * (fabs(temp)<1.) ;
#endif
}
#ifdef _OPENMP
for(v = 0; v <= tot_ev_per_dim[1]; v++)
#pragma omp atomic
pdf->PDF[densPosition[v]] += densValues[v];
#endif
}
}
//Compute the density in the bounding box of a sample - Generic function, used for spaces of dimensionality higher than 3
void compute2DBox_ND(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos, double * x0,
double * dx, int dim, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition,
double * restrict PCdot_vec, double * restrict temp_vec, double * restrict gridpoint_vec)
{
int u,v,d,l; //Loop variables
int HalfPosition;
int dimGreaterThanTwoPosition = 0;
double HalfTemp = 0;
#ifdef _OPENMP //Initializations for vector implementation
#pragma simd reduction(+:dimGreaterThanTwoPosition) assert
for(d = 2; d < dim; d++)
dimGreaterThanTwoPosition += (dif_pos[d] * pdf->pdfcumsize[d]);
for(v = 0; v < tot_ev_per_dim[1]; v++)
for(d = 2; d < dim; d++)
gridpoint_vec[v * dim + d] = gridpoint[d];
#endif
for(gridpoint[0] = lower[0], u = 0; u < tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++)
{
//Compiler flag to inform about structure alignment
__assume_aligned(densValues,64);
__assume_aligned(densPosition,64);
#ifdef _OPENMP //Vector friendly implementation
HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]) + dimGreaterThanTwoPosition;
for(v = 0; v < tot_ev_per_dim[1]; v++)
gridpoint_vec[v * dim + 0] = gridpoint[0];
for(v = 0; v < tot_ev_per_dim[1]; v++)
temp_vec[v] = 0;
for(v = 0; v < tot_ev_per_dim[1]; v++)
gridpoint_vec[v * dim + 1] = (lower[1] + (dx[1] * v));
for(v = 0; v < tot_ev_per_dim[1] * dim; v++)
PCdot_vec[v] = 0;
for(v = 0; v < tot_ev_per_dim[1]; v++)
for(d = 0; d < dim; d++)
#pragma simd reduction(+:PCdot_vec[v * dim + d]) assert
for(l = 0; l < dim; l++)
PCdot_vec[v * dim + d] += (eigenvectors->me[d][l] * gridpoint_vec[v * dim + l]);
for(v = 0; v < tot_ev_per_dim[1]; v++)
#pragma simd reduction(+:temp_vec[v]) assert
for(d = 0; d < dim; d++)
temp_vec[v] += ((PC[d] - PCdot_vec[v * dim + d]) * (PC[d] - PCdot_vec[v * dim + d]));
for(v = 0; v < tot_ev_per_dim[1]; v++)
temp_vec[v] /= h2;
for(v = 0; v < tot_ev_per_dim[1]; v++)
densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]);
for(v = 0; v < tot_ev_per_dim[1]; v++)
densValues[v] = (0.5/cd*(dim + 2.)*(1.-temp_vec[v])) * (fabs(temp_vec[v])<1.);
for(v = 0; v < tot_ev_per_dim[1]; v++)
#pragma omp atomic
pdf->PDF[densPosition[v]] += densValues[v];
#else // Serial implementation
double temp;
dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0];
for(v = 0; v < tot_ev_per_dim[1]; v++)
{
gridpoint[1] = (lower[1] + (dx[1] * v));
//Conversion to PC space
for(d = 0; d < dim; d++)
PCdot_vec[d] = 0;
for(d = 0; d < dim; d++)
#pragma simd reduction(+:PCdot_vec[d]) assert
for(l = 0; l < dim; l++)
PCdot_vec[d] += (eigenvectors->me[d][l] * gridpoint[l]);
//Absolute distance calculation
temp = 0;
#pragma simd reduction(+:temp) assert
for(d = 0; d < dim; d++)
temp += ((PC[d] - PCdot_vec[d]) * (PC[d] - PCdot_vec[d]));
temp /= h2;
dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1];
*PDFitem(pdf ,dif_pos, dim) += (0.5/cd*(dim + 2.)*(1.-temp)) * (fabs(temp)<1.) ;
}
#endif
}
}
/**** Functions to calculate PDF, called from main ****/
//Compute the PDF of a one-dimensional grid space
void computePDF1D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0,
double *x1, double *dx, double *bounds, MAT *eigenvectors )
{
int i,j,u; //Loop variables
int dim = 1; //Dimensions of grid space
double cd = volumeConstant(dim); //Volume constants to calculate kernel values
double h2=h*h; //Squared bandwith value
double *PC; // Current sample (PC space)
double theintegral = 0.0;
double total_vol = 0.0;
double * sample;
double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space
double PCdot;
//Variables to calculate coordinates and number of gridpoints of bounding box
int steps;
double upper, lower, gridpoint;
int tot_ev;
size_t dif_pos[1];
double abs_bound,temp;
//Auxiliary vectors for OpenMP version
double * densValues;
int * densPosition;
#pragma omp parallel default(none) \
shared(stdout,mpdf,pdf,dim,x0,x1,dx,theintegral,total_vol,bounds,eigenvectors,cd,h2,k) \
private(i,j,u,sample,PC,lower,upper,steps,abs_bound,tot_ev,dif_pos,gridpoint,PCdot,densValues,densPosition,temp)
{
#ifdef _OPENMP
int dim0_max_size = ((ceil(bounds[0] / dx[0]) * 2) + 3);
densValues = (double *)_mm_malloc(sizeof(double) * dim0_max_size,64); //Vector to hold density values of each sample-gridpoint combination
densPosition = (int *)_mm_malloc(sizeof(int) * dim0_max_size,64); //Vector to hold the positions of densValues values in the PDF structure
#endif
//Initialize PDF structure to 0s
#pragma omp for
for(i = 0; i < pdf->total_size; i++)
pdf->PDF[i] = 0.0f;
//Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure
#pragma omp for
for(i=0;i<mpdf->current;i++)
{
sample = MPDFPosition(mpdf,i); //Get current sample
PC = MPDFPCPosition(mpdf,i); //Get current sample (scaled as PC)
//For each sample, calculate its boundaries
//Lower corner
abs_bound = sample[0] - bounds[0];
if (x0[0] > abs_bound)
lower = x0[0];
else
{
steps = floor((abs_bound - x0[0]) / dx[0]);
lower = x0[0] + (steps * dx[0]);
}
//Upper corner
abs_bound = sample[0] + bounds[0];
if (x1[0] < abs_bound)
upper = x1[0];
else
{
steps = ceil((abs_bound - x0[0]) / dx[0]);
upper = x0[0] + (steps * dx[0]);
}
//Calculate number of eval points per dimension
tot_ev = rint((upper - lower)/dx[0]) + 1;
//Calculate the PDF of the defined 1D space
#ifdef _OPENMP
#pragma simd private(PCdot,temp) assert
#endif
for(u = 0; u < tot_ev; u++)
{
PCdot = (eigenvectors->me[0][0] * (lower + (dx[0] * u)));
//Absolute distance calculation
temp = ((PC[0] - PCdot) * (PC[0] - PCdot)) / h2;
//If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure
//Vector densPosition will contain the position of the gridpoint in the final PDF structure
#ifdef _OPENMP
//PDFposition
densPosition[u] = (((lower + (dx[0] * u)) - x0[0])/ dx[0]) * pdf->pdfcumsize[0];
densValues[u] = (0.5/cd*(1+2.)*(1.-temp)) * (fabs(temp)<1.);
//If serial version, store the density value of the sample over the gridpoint in the PDF structure
#else
dif_pos[0] = ((lower + (dx[0] * u)) - x0[0])/ dx[0];
*PDFitem(pdf ,dif_pos, 1) += (0.5/cd*(1+2.)*(1.-temp)) * (fabs(temp)<1.) ;
#endif
}
#ifdef _OPENMP
for(u = 0; u < tot_ev; u++)
#pragma omp atomic
pdf->PDF[densPosition[u]] += densValues[u];
#endif
}
#ifdef _OPENMP
_mm_free(densValues);
_mm_free(densPosition);
#endif
//Apply k constant to PDF
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i] * k;
//Calculate integral of PDF
#pragma omp for reduction(+:theintegral)
for(i=0; i < pdf->total_size; i++)
theintegral += pdf->PDF[i];
#pragma omp single
theintegral = theintegral * dx[0];
//Renormalize PDF using integral
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i]/theintegral;
//Calculate total volume of renormalized PDF
#pragma omp for reduction(+:total_vol)
for(i=0; i < pdf->total_size; i++)
total_vol += pdf->PDF[i];
}//End of parallel OpenMP Region
printf("Total integrated PDF: %g. The integral: %f\n",total_vol*dx[0],theintegral);
}
//Compute the PDF of a 2D grid space
void computePDF2D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0,
double *x1, double *dx, double *bounds, MAT *eigenvectors )
{
int i,j; //Loop variables
int dim = 2; //Dimensions of grid space
double cd = volumeConstant(dim); //Volume constants to calculate kernel values
double h2=h*h; //Squared bandwith value
double *PC; // Current sample (PC space)
double theintegral = 0.0;
double total_vol = 0.0;
double total_dx = dx[0] * dx[1];
double * sample;
double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space
double * PCdot;
//Variables to calculate coordinates and number of gridpoints of bounding box
int steps;
double upper, lower[2], gridpoint[2];
int tot_ev_per_dim[2];
size_t dif_pos[2];
double abs_bound;
//Auxiliary vectors for OpenMP version
double * densValues;
int * densPosition;
#pragma omp parallel default(none) \
shared(mpdf,pdf,dim,x0,x1,dx,total_dx,theintegral,total_vol,bounds,eigenvectors,cd,h2,k) \
private(i,j,sample,PC,lower,upper,steps,abs_bound,tot_ev_per_dim,dif_pos,gridpoint,PCdot,densValues,densPosition)
{
#ifdef _OPENMP
int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3);
densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64); //Vector to hold density values of each sample-gridpoint combination
densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64); //Vector to hold the positions of densValues values in the PDF structure
#endif
//Initialize PDF structure to 0s
#pragma omp for
for(i = 0; i < pdf->total_size; i++)
pdf->PDF[i] = 0.0f;
//Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure
#pragma omp for
for(i=0;i<mpdf->current;i++)
{
sample = MPDFPosition(mpdf,i); //Get current sample
PC = MPDFPCPosition(mpdf,i); //Get current sample (scaled as PC)
//For each sample, calculate its bounding box,
//expressed as coordinates of lower corner and number of gridpoints per dimensions
for(j = 0; j < 2; j++)
{
//Lower corner
abs_bound = sample[j] - bounds[j];
if (x0[j] > abs_bound)
lower[j] = x0[j];
else
{
steps = floor((abs_bound - x0[j]) / dx[j]);
lower[j] = x0[j] + (steps * dx[j]);
}
//Upper corner
abs_bound = sample[j] + bounds[j];
if (x1[j] < abs_bound)
upper = x1[j];
else
{
steps = ceil((abs_bound - x0[j]) / dx[j]);
upper = x0[j] + (steps * dx[j]);
}
//Calculate number of eval points per dimension
tot_ev_per_dim[j] = rint((upper - lower[j])/dx[j]) + 1;
}
//Calculate the PDF of the defined 2D box
compute2DBox_2D(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,h2,cd,eigenvectors,densValues,densPosition);
}
#ifdef _OPENMP
_mm_free(densValues);
_mm_free(densPosition);
#endif
//Apply k constant to PDF
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i] * k;
//Calculate integral of PDF
#pragma omp for reduction(+:theintegral)
for(i=0; i < pdf->total_size; i++)
theintegral += pdf->PDF[i];
#pragma omp single
theintegral = theintegral * total_dx;
//Renormalize PDF using integral
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i]/theintegral;
//Calculate total volume of renormalized PDF
#pragma omp for reduction(+:total_vol)
for(i=0; i < pdf->total_size; i++)
total_vol += pdf->PDF[i];
}//End of parallel OpenMP Region
printf("Total integrated PDF: %g. The integral: %f\n",total_vol*dx[0]*dx[1],theintegral);
}
#define DEBUG_TEMPS 1
#undef DEBUG_TEMPS
//Compute the PDF of grid spaces of dimension 3 or higher
void computePDF3D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h ,
double detSm1 , double *x0, double *x1, double *dx,
double *bounds, MAT *eigenvectors)
{
int dim = 3;
int i,j,l,u,w; //Loop variables
double cd = volumeConstant(dim); //Volume constant
double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space
double h2=h*h; //Square of bandwith value
double *PC; // Current sample (PC space)
double total_vol=0.0;
double theintegral=0.0;
double * sample; //Current sample
double * PCdot;
//Variables to calculate the bounding box of a sample
double lower[3];
double upper;
double gridpoint[3];
int tot_ev_per_dim[3];
size_t dif_pos[3];
int total_ev;
int steps;
double abs_bound; //Absolute bound per sample and dimension, given by ellipsoid shape
//Calculate acumulated volume for the grid space
double total_dx = 1.0;
for (i = 0; i < dim; i++)
total_dx *= dx[i];
//Variables to perform the calculation of the 2D layering
double A,B,C,F,Z,theta,cosTheta,sinTheta,X2,Y2,X,Y,XY,termY2,valor,termX2,upy,rightx,upx_rot,upy_rot,rightx_rot,righty_rot;
double bound[2],box_center[2],box_min[2],box_max[2],box_steps[2],box_upper[2];
//Calculate partial equations for the 2D layering
A = Sm1->me[0][0];
B = 2 * Sm1->me[0][1];
C = Sm1->me[1][1];
theta = atan(B/(A-C))/2;
cosTheta = cos(theta);
sinTheta = sin(theta);
X2 = Sm1->me[0][0]*cosTheta*cosTheta + 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*sinTheta*sinTheta;
XY = -2*Sm1->me[0][0]*cosTheta*sinTheta + 2*Sm1->me[0][1]*cosTheta*cosTheta - 2*Sm1->me[0][1]*sinTheta*sinTheta + 2*Sm1->me[1][1]*cosTheta*sinTheta;
Y2 = Sm1->me[0][0]*sinTheta*sinTheta - 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*cosTheta*cosTheta;
//Aux vector for OpenMP version
double * densValues;
int * densPosition;
double * temp_vec;
double * PCdot_vec;
double * gridpoint_vec;
//Beginning of OpenMP parallel region
#pragma omp parallel default(none)\
shared(stdout,theintegral,total_vol,total_dx,k,mpdf,pdf,cd,dim,bounds,x0,x1,dx,Sm1,cosTheta,sinTheta,eigenvectors,X2,XY,Y2,h2,h) \
private(i,j,l,u,w,sample,PC,gridpoint,total_ev,abs_bound,lower,box_upper,tot_ev_per_dim,box_steps,F,X,Y,Z,termX2,termY2,upy,rightx,upx_rot,upy_rot, \
valor,rightx_rot,righty_rot,bound,box_center,box_min,box_max,PCdot,dif_pos,steps,upper,densValues,densPosition,temp_vec,gridpoint_vec,PCdot_vec)
{
#ifdef _OPENMP
int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3);
densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64);
densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64);
#endif
//Initialize PDF structure to 0s
#pragma omp for
for(i = 0; i < pdf->total_size; i++)
pdf->PDF[i] = 0.0f;
//Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure
#pragma omp for
for(i=0;i<mpdf->current;i++)
{
sample = MPDFPosition(mpdf,i); //Get current sample
PC = MPDFPCPosition(mpdf,i); //X is the current sample (scaled as PC)
//Calculate boundaries for Z axis
//Lower corner
abs_bound = sample[2] - bounds[2];
if (x0[2] > abs_bound)
lower[2] = x0[2];
else
{
steps = floor((abs_bound - x0[2]) / dx[2]);
lower[2] = x0[2] + (steps * dx[2]);
}
//Upper corner
abs_bound = sample[2] + bounds[2];
if (x1[2] < abs_bound)
upper = x1[2];
else
{
steps = ceil((abs_bound - x0[2]) / dx[2]);
upper = x0[2] + (steps * dx[2]);
}
//Calculate number of grid points per dimension
total_ev = rint((upper - lower[2])/dx[2]) + 1;
//For each gridpoint in dimensions 3 to N
for(j = 0; j < total_ev; j++)
{
//Calculate location of grid point
gridpoint[2] = lower[2] + (dx[2] * j);
dif_pos[2] = (gridpoint[2] - x0[2])/ dx[2];
/* This code calculates, a 2D plane formed by the first two dimensions of the space, the optimal
* box inside the initial bounding box */
Z = gridpoint[2] - sample[2];
//X,Y, along with X2,XY,Y2 form the equation of the 2D rotated plane
F = Sm1->me[2][2] * Z * Z - 1;
X = 2*Sm1->me[0][2]*Z*cosTheta + 2*Sm1->me[1][2]*Z*sinTheta;
Y = -2*Sm1->me[0][2]*Z*sinTheta + 2*Sm1->me[1][2]*Z*cosTheta;
//Calculate displacements and obtain formula (x-xo)^2 / a^2 + % (y-yo)^2/b^2 = 1
termX2 = (X/X2)/2;
termY2 = (Y/Y2)/2;
valor = -F + termX2*termX2*X2 + termY2*termY2*Y2;
//Calculate new rotated bounding box. UP and RIGHT are the corners of the new bounding box
upy = sqrt(1/(Y2/valor)) * h;
rightx = sqrt(1/(X2/valor)) * h;
upx_rot = 0 * cosTheta + upy * sinTheta;
upy_rot = -0 * sinTheta + upy * cosTheta;
rightx_rot = rightx * cosTheta + 0 * sinTheta;
righty_rot = -rightx * sinTheta + 0 * cosTheta;
//Calculate original displacement (rotated ellipse)
box_center[0] = termX2*cosTheta-termY2*sinTheta;
box_center[1] = termX2*sinTheta+termY2*cosTheta;
bound[0] = sqrt(upx_rot*upx_rot+rightx_rot*rightx_rot);
bound[1] = sqrt(upy_rot*upy_rot+righty_rot*righty_rot);
//Calculate lower and upper bound of new BoundingBox
for(u = 0; u < 2; u++)
{
box_min[u] = (sample[u] - box_center[u]) - bound[u];
box_steps[u] = floor((box_min[u] - x0[u]) / dx[u]);
lower[u] = (x0[u] > box_min[u])?(x0[u]):(x0[u] + (box_steps[u] * dx[u]));
box_max[u] = (sample[u] - box_center[u]) + bound[u];
box_steps[u] = ceil((box_max[u] - x0[u]) / dx[u]);
box_upper[u] = (x1[u] < box_max[u])?(x1[u]):(x0[u] + (box_steps[u] * dx[u]));
tot_ev_per_dim[u] = rint((box_upper[u] - lower[u])/dx[u]);
}
//Calculate the PDF of the defined 2D box
compute2DBox_3D(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,h2,cd,eigenvectors,densValues,densPosition);
}//End of "per gridpoint" for
} //End of "per sample" for
#ifdef _OPENMP
_mm_free(densValues);
_mm_free(densPosition);
#endif
//Apply k constant to PDF
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i] * k;
//Calculate integral of PDF
#pragma omp for reduction(+:theintegral)
for(i=0; i < pdf->total_size; i++)
theintegral += pdf->PDF[i];
#pragma omp single
theintegral = theintegral * total_dx;
//Renormalize PDF using integral
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i]/theintegral;
//Calculate total volume of renormalized PDF
#pragma omp for reduction(+:total_vol)
for(i=0; i < pdf->total_size; i++)
total_vol += pdf->PDF[i];
}//End of parallel OpenMP Region
printf("Total integrated PDF: %g. The integral: %f\n",total_vol*total_dx,theintegral);
}
//Compute the PDF of grid spaces of dimension 3 or higher
void computePDFND(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h ,
double detSm1 , double *x0, double *x1, double *dx,
double *bounds, MAT *eigenvectors, int dim)
{
int i,j,l,u,w; //Loop variables
double cd = volumeConstant(dim); //Volume constant
double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space
double h2=h*h; //Square of bandwith value
double *PC; // Current sample (PC space)
double total_vol=0.0;
double theintegral=0.0;
double * sample; //Current sample
double * PCdot;
//Variables to calculate the bounding box of a sample
double * lower;
double upper;
double * gridpoint;
int * tot_ev_per_dim;
size_t * dif_pos;
int total_ev;
int steps;
double abs_bound; //Absolute bound per sample and dimension, given by ellipsoid shape
//Calculate acumulated volume for the grid space
double total_dx = 1.0;
for (i = 0; i < dim; i++)
total_dx *= dx[i];
//Variables to perform the calculation of the 2D layering
double A,B,C,F,Z,theta,cosTheta,sinTheta,X2,Y2,X,Y,XY,termY2,valor,termX2,upy,rightx,upx_rot,upy_rot,rightx_rot,righty_rot;
double bound[2],box_center[2],box_min[2],box_max[2],box_steps[2],box_upper[2];
//Calculate partial equations for the 2D layering
A = Sm1->me[0][0];
B = 2 * Sm1->me[0][1];
C = Sm1->me[1][1];
theta = atan(B/(A-C))/2;
cosTheta = cos(theta);
sinTheta = sin(theta);
X2 = Sm1->me[0][0]*cosTheta*cosTheta + 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*sinTheta*sinTheta;
XY = -2*Sm1->me[0][0]*cosTheta*sinTheta + 2*Sm1->me[0][1]*cosTheta*cosTheta - 2*Sm1->me[0][1]*sinTheta*sinTheta + 2*Sm1->me[1][1]*cosTheta*sinTheta;
Y2 = Sm1->me[0][0]*sinTheta*sinTheta - 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*cosTheta*cosTheta;
//Aux vector for OpenMP version
double * densValues;
int * densPosition;
double * temp_vec;
double * PCdot_vec;
double * gridpoint_vec;
//Beginning of OpenMP parallel region
#pragma omp parallel default(none)\
shared(stdout,theintegral,total_vol,total_dx,k,mpdf,pdf,cd,dim,bounds,x0,x1,dx,Sm1,cosTheta,sinTheta,eigenvectors,X2,XY,Y2,h2,h) \
private(i,j,l,u,w,sample,PC,gridpoint,total_ev,abs_bound,lower,box_upper,tot_ev_per_dim,box_steps,F,X,Y,Z,termX2,termY2,upy,rightx,upx_rot,upy_rot, \
valor,rightx_rot,righty_rot,bound,box_center,box_min,box_max,PCdot,dif_pos,steps,upper,densValues,densPosition,temp_vec,gridpoint_vec,PCdot_vec)
{
//Allocate variables to calculate the bounding box of a sample
lower = (double *)malloc(sizeof(double) * dim);
gridpoint = (double *)malloc(sizeof(double) * dim);
tot_ev_per_dim = (int *)malloc(sizeof(int) * dim);
dif_pos = (size_t *)malloc(sizeof(size_t) * dim);
#ifdef _OPENMP
int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3);
densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64);
densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64);
temp_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64);
gridpoint_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size * dim,64);
PCdot_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size * dim,64);
#else
PCdot_vec = (double *)malloc(sizeof(double) * dim);
#endif
//Initialize PDF structure to 0s
#pragma omp for
for(i = 0; i < pdf->total_size; i++)
pdf->PDF[i] = 0.0f;
//Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure
#pragma omp for
for(i=0;i<mpdf->current;i++)
{
sample = MPDFPosition(mpdf,i); //Get current sample
PC = MPDFPCPosition(mpdf,i); //X is the current sample (scaled as PC)
//For each sample, calculate its bounding box,
//expressed as coordinates of lower corner and number of gridpoints per dimensions
total_ev = 1;
for(j = 2; j < dim; j++)
{
//Lower corner
abs_bound = sample[j] - bounds[j];
if (x0[j] > abs_bound)
lower[j] = x0[j];
else
{
steps = floor((abs_bound - x0[j]) / dx[j]);
lower[j] = x0[j] + (steps * dx[j]);
}
//Upper corner
abs_bound = sample[j] + bounds[j];
if (x1[j] < abs_bound)
upper = x1[j];
else
{
steps = ceil((abs_bound - x0[j]) / dx[j]);
upper = x0[j] + (steps * dx[j]);
}
//Calculate number of grid points per dimension
tot_ev_per_dim[j] = rint((upper - lower[j])/dx[j]) + 1;
total_ev *= tot_ev_per_dim[j] ;
}
//For each gridpoint in dimensions 3 to N
for(j = 0; j < total_ev; j++)
{
//Calculate location of grid point
int divisor;
int eval_point = j;
for(u = 2; u < dim-1; u++)
{
divisor = 1;
for(w = u+1; w < dim; w++)
divisor *= tot_ev_per_dim[w];
gridpoint[u] = lower[u] + (dx[u] * (eval_point / divisor));
eval_point = eval_point % divisor;
}
gridpoint[dim-1] = lower[dim-1] + (dx[dim-1] * eval_point); //Last case
//Fill structure with gridpoint position
for(l = 2; l < dim; l++)
dif_pos[l] = (gridpoint[l] - x0[l])/ dx[l];
/* This code calculates, a 2D plane formed by the first two dimensions of the space, the optimal
* box inside the initial bounding box */
Z = gridpoint[2] - sample[2];
//X,Y, along with X2,XY,Y2 form the equation of the 2D rotated plane
F = Sm1->me[2][2] * Z * Z - 1;
X = 2*Sm1->me[0][2]*Z*cosTheta + 2*Sm1->me[1][2]*Z*sinTheta;
Y = -2*Sm1->me[0][2]*Z*sinTheta + 2*Sm1->me[1][2]*Z*cosTheta;
//Calculate displacements and obtain formula (x-xo)^2 / a^2 + % (y-yo)^2/b^2 = 1
termX2 = (X/X2)/2;
termY2 = (Y/Y2)/2;
valor = -F + termX2*termX2*X2 + termY2*termY2*Y2;
//Calculate new rotated bounding box. UP and RIGHT are the corners of the new bounding box
upy = sqrt(1/(Y2/valor)) * h;
rightx = sqrt(1/(X2/valor)) * h;
upx_rot = 0 * cosTheta + upy * sinTheta;
upy_rot = -0 * sinTheta + upy * cosTheta;
rightx_rot = rightx * cosTheta + 0 * sinTheta;
righty_rot = -rightx * sinTheta + 0 * cosTheta;
//Calculate original displacement (rotated ellipse)
box_center[0] = termX2*cosTheta-termY2*sinTheta;
box_center[1] = termX2*sinTheta+termY2*cosTheta;
bound[0] = sqrt(upx_rot*upx_rot+rightx_rot*rightx_rot);
bound[1] = sqrt(upy_rot*upy_rot+righty_rot*righty_rot);
//Calculate lower and upper bound of new BoundingBox
for(u = 0; u < 2; u++)
{
box_min[u] = (sample[u] - box_center[u]) - bound[u];
box_steps[u] = floor((box_min[u] - x0[u]) / dx[u]);
lower[u] = (x0[u] > box_min[u])?(x0[u]):(x0[u] + (box_steps[u] * dx[u]));
box_max[u] = (sample[u] - box_center[u]) + bound[u];
box_steps[u] = ceil((box_max[u] - x0[u]) / dx[u]);
box_upper[u] = (x1[u] < box_max[u])?(x1[u]):(x0[u] + (box_steps[u] * dx[u]));
tot_ev_per_dim[u] = rint((box_upper[u] - lower[u])/dx[u]);
}
//Calculate the PDF of the defined 2D box
compute2DBox_ND(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,dim,h2,cd,eigenvectors,densValues,densPosition,PCdot_vec,temp_vec,gridpoint_vec);
}//End of "per gridpoint" for
} //End of "per sample" for
//Delete memory structures created by threads
free(lower);
free(tot_ev_per_dim);
free(dif_pos);
free(gridpoint);
#ifdef _OPENMP
_mm_free(densValues);
_mm_free(densPosition);
_mm_free(PCdot_vec);
_mm_free(temp_vec);
_mm_free(gridpoint_vec);
#else
free(PCdot_vec);
#endif
//Apply k constant to PDF
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i] * k;
//Calculate integral of PDF
#pragma omp for reduction(+:theintegral)
for(i=0; i < pdf->total_size; i++)
theintegral += pdf->PDF[i];
#pragma omp single
theintegral = theintegral * total_dx;
//Renormalize PDF using integral
#pragma omp for
for(i=0; i < pdf->total_size; i++)
pdf->PDF[i] = pdf->PDF[i]/theintegral;
//Calculate total volume of renormalized PDF
#pragma omp for reduction(+:total_vol)
for(i=0; i < pdf->total_size; i++)
total_vol += pdf->PDF[i];
}//End of parallel OpenMP Region
printf("Total integrated PDF: %g. The integral: %f\n",total_vol*total_dx,theintegral);
}
|
convolutions.c | // Copyright (c) 2014, Oren Rippel and Ryan P. Adams
// All rights reserved.
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <stdint.h>
#include <omp.h>
#include <mkl.h>
#include <math.h>
#include <offload.h>
#include <assert.h>
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#include <immintrin.h>
#include <mkl_dfti.h>
#include <complex.h>
#include <generated_macros.h>
#include <macros.h>
#if ((N_BLOCK*K_BLOCK) != 16) && ((N_BLOCK*K_BLOCK) != 32) && ((N_BLOCK*K_BLOCK) != 64)
#error "N_BLOCK*K_BLOCK should be 16,32 or 64"
#endif
#if ( (N_BLOCK * K_BLOCK) == 16)
#define LOAD_OUTPUTS \
__m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#define STORE_OUTPUTS \
_mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
#elif ( (N_BLOCK * K_BLOCK) == 32)
#define LOAD_OUTPUTS \
__m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#define STORE_OUTPUTS \
_mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); \
_mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
#elif ( (N_BLOCK * K_BLOCK) == 64)
#define LOAD_OUTPUTS \
__m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 res_3 = _mm512_extload_ps(convolutions + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 res_4 = _mm512_extload_ps(convolutions + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#define STORE_OUTPUTS \
_mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); \
_mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); \
_mm512_extstore_ps((float *)(convolutions + 32), res_3, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE); \
_mm512_extstore_ps((float *)(convolutions + 48), res_4, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
#endif
#if (N_BLOCK == 1)
#if (K_BLOCK == 16)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_filters_0, v_inputs_0, res_1);
#elif (K_BLOCK == 32)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_filters_0, v_inputs_0, res_1); \
res_2 = _mm512_fmadd_ps(v_filters_1, v_inputs_0, res_2);
#elif (K_BLOCK == 64)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_filters_0, v_inputs_0, res_1); \
res_2 = _mm512_fmadd_ps(v_filters_1, v_inputs_0, res_2); \
res_3 = _mm512_fmadd_ps(v_filters_2, v_inputs_0, res_3); \
res_4 = _mm512_fmadd_ps(v_filters_3, v_inputs_0, res_4);
#endif
#elif (N_BLOCK == 4)
#if (K_BLOCK == 16)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_filters_0, _mm512_swizzle_ps(v_inputs_0, _MM_SWIZ_REG_AAAA), res_1); \
res_2 = _mm512_fmadd_ps(v_filters_0, _mm512_swizzle_ps(v_inputs_0, _MM_SWIZ_REG_BBBB), res_2); \
res_3 = _mm512_fmadd_ps(v_filters_0, _mm512_swizzle_ps(v_inputs_0, _MM_SWIZ_REG_CCCC), res_3); \
res_4 = _mm512_fmadd_ps(v_filters_0, _mm512_swizzle_ps(v_inputs_0, _MM_SWIZ_REG_DDDD), res_4);
#endif
#elif (N_BLOCK == 16)
#if (K_BLOCK == 1)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_inputs_0, v_filters_0, res_1);
#elif (K_BLOCK == 4)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_inputs_0, _mm512_swizzle_ps(v_filters_0, _MM_SWIZ_REG_AAAA), res_1); \
res_2 = _mm512_fmadd_ps(v_inputs_0, _mm512_swizzle_ps(v_filters_0, _MM_SWIZ_REG_BBBB), res_2); \
res_3 = _mm512_fmadd_ps(v_inputs_0, _mm512_swizzle_ps(v_filters_0, _MM_SWIZ_REG_CCCC), res_3); \
res_4 = _mm512_fmadd_ps(v_inputs_0, _mm512_swizzle_ps(v_filters_0, _MM_SWIZ_REG_DDDD), res_4);
#endif
#elif (N_BLOCK == 32)
#if (K_BLOCK == 1)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_inputs_0, v_filters_0, res_1); \
res_2 = _mm512_fmadd_ps(v_inputs_1, v_filters_0, res_2);
#endif
#elif (N_BLOCK == 64)
#if (K_BLOCK == 1)
#define COMPUTE_OUTPUTS \
res_1 = _mm512_fmadd_ps(v_inputs_0, v_filters_0, res_1); \
res_2 = _mm512_fmadd_ps(v_inputs_1, v_filters_0, res_2); \
res_3 = _mm512_fmadd_ps(v_inputs_2, v_filters_0, res_3); \
res_4 = _mm512_fmadd_ps(v_inputs_3, v_filters_0, res_4);
#endif
#endif
#if 0
res_1 = _mm512_fmadd_ps(v_inputs_0, v_filters_0, res_1); \
res_2 = _mm512_fmadd_ps(v_inputs_0, v_filters_1, res_2); \
res_3 = _mm512_fmadd_ps(v_inputs_0, v_filters_2, res_3); \
res_4 = _mm512_fmadd_ps(v_inputs_0, v_filters_3, res_4);
#endif
// PREFETCH_FILTERS
#if (K_BLOCK == 1)
#define PREFETCH_FILTERS ;
#elif (K_BLOCK == 4)
#define PREFETCH_FILTERS ;
#elif (K_BLOCK == 16)
#define PREFETCH_FILTERS \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK), _MM_HINT_T0);
#elif (K_BLOCK == 32)
#define PREFETCH_FILTERS \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK), _MM_HINT_T0); \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK + 16), _MM_HINT_T0);
#elif (K_BLOCK == 64)
#define PREFETCH_FILTERS \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK), _MM_HINT_T0); \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK + 16), _MM_HINT_T0); \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK + 32), _MM_HINT_T0); \
_mm_prefetch((char *)(filters_pointer + X_const*K_BLOCK + 48), _MM_HINT_T0);
#endif
// PREFETCH_INPUTS
#if (N_BLOCK == 1)
#define PREFETCH_INPUTS ;
#elif (N_BLOCK == 4)
#define PREFETCH_INPUTS ;
#elif (N_BLOCK == 16)
#define PREFETCH_INPUTS \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK), _MM_HINT_T0);
#elif (N_BLOCK == 32)
#define PREFETCH_INPUTS \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK), _MM_HINT_T0); \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK + 16), _MM_HINT_T0);
#elif (N_BLOCK == 64)
#define PREFETCH_INPUTS \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK), _MM_HINT_T0); \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK + 16), _MM_HINT_T0); \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK + 32), _MM_HINT_T0); \
_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK + 48), _MM_HINT_T0);
#endif
//LOAD_INPUTS
#if N_BLOCK==1
#define LOAD_INPUTS \
__m512 v_inputs_0 = _mm512_set1_ps(*inputs_pointer);
#elif N_BLOCK==4
#define LOAD_INPUTS \
__m512 v_inputs_0 = _mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
#elif N_BLOCK == 16
#define LOAD_INPUTS \
__m512 v_inputs_0 = _mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#elif N_BLOCK == 32
#define LOAD_INPUTS \
__m512 v_inputs_0 = _mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 v_inputs_1 = _mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#elif N_BLOCK == 64
#define LOAD_INPUTS \
__m512 v_inputs_0 = _mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_inputs_1 = _mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_inputs_2 = _mm512_extload_ps(inputs_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_inputs_3 = _mm512_extload_ps(inputs_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#endif
//LOAD_FILTERS
#if K_BLOCK==1
#define LOAD_FILTERS \
__m512 v_filters_0 = _mm512_set1_ps(*filters_pointer);
#elif K_BLOCK==4
#define LOAD_FILTERS \
__m512 v_filters_0 = _mm512_extload_ps(filters_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST_4X16, _MM_HINT_NONE);
#elif K_BLOCK == 16
#define LOAD_FILTERS \
__m512 v_filters_0 = _mm512_extload_ps(filters_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#elif K_BLOCK == 32
#define LOAD_FILTERS \
__m512 v_filters_0 = _mm512_extload_ps(filters_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE); \
__m512 v_filters_1 = _mm512_extload_ps(filters_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#elif K_BLOCK == 64
#define LOAD_FILTERS \
__m512 v_filters_0 = _mm512_extload_ps(filters_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_filters_1 = _mm512_extload_ps(filters_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_filters_2 = _mm512_extload_ps(filters_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);\
__m512 v_filters_3 = _mm512_extload_ps(filters_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
#endif
// convolution after interleaving N and K and blocking C, after intrinsics
int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const);
assert(H == H_const);
assert(W == W_const);
assert(K == K_const);
assert(stride == stride_const);
assert(padding == padding_const);
assert(pooling_radius == pooling_radius_const);
assert(pooling_stride == pooling_stride_const);
assert(X == X_const);
assert(Y == Y_const);
assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of constants
int XWN = (-X_const + W_const)*N,
HYWN = (H_const-Y_const)*W_const*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*N_BLOCK*K_BLOCK : output_H_const*output_W_const*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const; h++){
for (w = 0; w < output_W_const; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const, output_W_const, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const, output_W_const, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const >= 0 &&
h - padding_const >= 0 &&
output_W_const - 1 - w >= padding_const &&
output_H_const - 1 - h >= padding_const){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const, w - padding_const, 0, C_const, H_const, W_const, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const, Y_const, X_const, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const + Y_const, w - padding_const + X_const + 2 that may help the above problem if it exists
for (y = 0; y < Y_const; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const + W_const)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), 0, C_const, H_const, W_const, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const, Y_const, X_const, K_BLOCK);
int min_x = mx(0, (padding_const - w));
int max_x = mn(X_const, (W_const + padding_const - w));
int min_y = mx(0, (padding_const - h));
int max_y = mn(Y_const, (H_const + padding_const - h));
filters_pointer += min_y*X_const*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const-padding_const)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const*N_BLOCK;
} //y
filters_pointer += (Y_const - max_y)*X_const*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const; h++){
for (w = 0; w < pooled_W_const; w++){
int h_output = h*pooling_stride_const;
int w_output = w*pooling_stride_const;
int window_width = pooling_radius_const - mx(w_output + pooling_radius_const - output_W_const, 0);
int window_height = pooling_radius_const - mx(h_output + pooling_radius_const - output_H_const, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const, output_W_const, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const - window_width;
outputs_pointer += (output_W_const - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer2(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const2);
assert(H == H_const2);
assert(W == W_const2);
assert(K == K_const2);
assert(stride == stride_const2);
assert(padding == padding_const2);
assert(pooling_radius == pooling_radius_const2);
assert(pooling_stride == pooling_stride_const2);
assert(X == X_const2);
assert(Y == Y_const2);
assert(output_H_const2 == (H_const2 + 2*padding_const2 - Y_const2 + 1)/stride_const2);
assert(output_W_const2 == (W_const2 + 2*padding_const2 - X_const2 + 1)/stride_const2);
assert(pooled_H_const2 == ceil((output_H_const2 - pooling_radius_const2 + 1.f)/pooling_stride_const2));
assert(pooled_W_const2 == ceil((output_W_const2 - pooling_radius_const2 + 1.f)/pooling_stride_const2));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const2 + W_const2)*N,
HYWN = (H_const2-Y_const2)*W_const2*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const2/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const2/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const2/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const2*output_W_const2*N_BLOCK*K_BLOCK : output_H_const2*output_W_const2*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const2/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const2; h++){
for (w = 0; w < output_W_const2; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const2, output_W_const2, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const2, output_W_const2, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const2, output_W_const2, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const2, output_W_const2, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const2 >= 0 &&
h - padding_const2 >= 0 &&
output_W_const2 - 1 - w >= padding_const2 &&
output_H_const2 - 1 - h >= padding_const2){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const2, w - padding_const2, 0, C_const2, H_const2, W_const2, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const2, Y_const2, X_const2, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const2 + Y_const2, w - padding_const2 + X_const2 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const2; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const2*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const2; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const2 + W_const2)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const2, H_const2-1), 0), mx(mn(w-padding_const2, W_const2-1), 0), 0, C_const2, H_const2, W_const2, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const2, Y_const2, X_const2, K_BLOCK);
int min_x = mx(0, (padding_const2 - w));
int max_x = mn(X_const2, (W_const2 + padding_const2 - w));
int min_y = mx(0, (padding_const2 - h));
int max_y = mn(Y_const2, (H_const2 + padding_const2 - h));
filters_pointer += min_y*X_const2*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const2*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const2-padding_const2)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const2 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const2*N_BLOCK;
} //y
filters_pointer += (Y_const2 - max_y)*X_const2*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const2; h++){
for (w = 0; w < pooled_W_const2; w++){
int h_output = h*pooling_stride_const2;
int w_output = w*pooling_stride_const2;
int window_width = pooling_radius_const2 - mx(w_output + pooling_radius_const2 - output_W_const2, 0);
int window_height = pooling_radius_const2 - mx(h_output + pooling_radius_const2 - output_H_const2, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const2, output_W_const2, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const2, output_W_const2, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const2, pooled_H_const2, pooled_W_const2, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const2, pooled_H_const2, pooled_W_const2, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const2 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const2 - window_width;
outputs_pointer += (output_W_const2 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer3(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const3);
assert(H == H_const3);
assert(W == W_const3);
assert(K == K_const3);
assert(stride == stride_const3);
assert(padding == padding_const3);
assert(pooling_radius == pooling_radius_const3);
assert(pooling_stride == pooling_stride_const3);
assert(X == X_const3);
assert(Y == Y_const3);
assert(output_H_const3 == (H_const3 + 2*padding_const3 - Y_const3 + 1)/stride_const3);
assert(output_W_const3 == (W_const3 + 2*padding_const3 - X_const3 + 1)/stride_const3);
assert(pooled_H_const3 == ceil((output_H_const3 - pooling_radius_const3 + 1.f)/pooling_stride_const3));
assert(pooled_W_const3 == ceil((output_W_const3 - pooling_radius_const3 + 1.f)/pooling_stride_const3));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const3 + W_const3)*N,
HYWN = (H_const3-Y_const3)*W_const3*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const3/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const3/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const3/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const3*output_W_const3*N_BLOCK*K_BLOCK : output_H_const3*output_W_const3*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const3/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const3; h++){
for (w = 0; w < output_W_const3; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const3, output_W_const3, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const3, output_W_const3, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const3, output_W_const3, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const3, output_W_const3, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const3 >= 0 &&
h - padding_const3 >= 0 &&
output_W_const3 - 1 - w >= padding_const3 &&
output_H_const3 - 1 - h >= padding_const3){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const3, w - padding_const3, 0, C_const3, H_const3, W_const3, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const3, Y_const3, X_const3, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const3 + Y_const3, w - padding_const3 + X_const3 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const3; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const3*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const3; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const3 + W_const3)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const3, H_const3-1), 0), mx(mn(w-padding_const3, W_const3-1), 0), 0, C_const3, H_const3, W_const3, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const3, Y_const3, X_const3, K_BLOCK);
int min_x = mx(0, (padding_const3 - w));
int max_x = mn(X_const3, (W_const3 + padding_const3 - w));
int min_y = mx(0, (padding_const3 - h));
int max_y = mn(Y_const3, (H_const3 + padding_const3 - h));
filters_pointer += min_y*X_const3*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const3*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const3-padding_const3)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const3 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const3*N_BLOCK;
} //y
filters_pointer += (Y_const3 - max_y)*X_const3*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const3; h++){
for (w = 0; w < pooled_W_const3; w++){
int h_output = h*pooling_stride_const3;
int w_output = w*pooling_stride_const3;
int window_width = pooling_radius_const3 - mx(w_output + pooling_radius_const3 - output_W_const3, 0);
int window_height = pooling_radius_const3 - mx(h_output + pooling_radius_const3 - output_H_const3, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const3, output_W_const3, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const3, output_W_const3, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const3, pooled_H_const3, pooled_W_const3, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const3, pooled_H_const3, pooled_W_const3, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const3 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const3 - window_width;
outputs_pointer += (output_W_const3 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer4(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const4);
assert(H == H_const4);
assert(W == W_const4);
assert(K == K_const4);
assert(stride == stride_const4);
assert(padding == padding_const4);
assert(pooling_radius == pooling_radius_const4);
assert(pooling_stride == pooling_stride_const4);
assert(X == X_const4);
assert(Y == Y_const4);
assert(output_H_const4 == (H_const4 + 2*padding_const4 - Y_const4 + 1)/stride_const4);
assert(output_W_const4 == (W_const4 + 2*padding_const4 - X_const4 + 1)/stride_const4);
assert(pooled_H_const4 == ceil((output_H_const4 - pooling_radius_const4 + 1.f)/pooling_stride_const4));
assert(pooled_W_const4 == ceil((output_W_const4 - pooling_radius_const4 + 1.f)/pooling_stride_const4));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const4 + W_const4)*N,
HYWN = (H_const4-Y_const4)*W_const4*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const4/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const4/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const4/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const4*output_W_const4*N_BLOCK*K_BLOCK : output_H_const4*output_W_const4*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const4/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const4; h++){
for (w = 0; w < output_W_const4; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const4, output_W_const4, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const4, output_W_const4, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const4, output_W_const4, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const4, output_W_const4, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const4 >= 0 &&
h - padding_const4 >= 0 &&
output_W_const4 - 1 - w >= padding_const4 &&
output_H_const4 - 1 - h >= padding_const4){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const4, w - padding_const4, 0, C_const4, H_const4, W_const4, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const4, Y_const4, X_const4, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const4 + Y_const4, w - padding_const4 + X_const4 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const4; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const4*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const4; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const4 + W_const4)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const4, H_const4-1), 0), mx(mn(w-padding_const4, W_const4-1), 0), 0, C_const4, H_const4, W_const4, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const4, Y_const4, X_const4, K_BLOCK);
int min_x = mx(0, (padding_const4 - w));
int max_x = mn(X_const4, (W_const4 + padding_const4 - w));
int min_y = mx(0, (padding_const4 - h));
int max_y = mn(Y_const4, (H_const4 + padding_const4 - h));
filters_pointer += min_y*X_const4*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const4*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const4-padding_const4)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const4 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const4*N_BLOCK;
} //y
filters_pointer += (Y_const4 - max_y)*X_const4*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const4; h++){
for (w = 0; w < pooled_W_const4; w++){
int h_output = h*pooling_stride_const4;
int w_output = w*pooling_stride_const4;
int window_width = pooling_radius_const4 - mx(w_output + pooling_radius_const4 - output_W_const4, 0);
int window_height = pooling_radius_const4 - mx(h_output + pooling_radius_const4 - output_H_const4, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const4, output_W_const4, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const4, output_W_const4, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const4, pooled_H_const4, pooled_W_const4, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const4, pooled_H_const4, pooled_W_const4, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const4 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const4 - window_width;
outputs_pointer += (output_W_const4 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer5(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const5);
assert(H == H_const5);
assert(W == W_const5);
assert(K == K_const5);
assert(stride == stride_const5);
assert(padding == padding_const5);
assert(pooling_radius == pooling_radius_const5);
assert(pooling_stride == pooling_stride_const5);
assert(X == X_const5);
assert(Y == Y_const5);
assert(output_H_const5 == (H_const5 + 2*padding_const5 - Y_const5 + 1)/stride_const5);
assert(output_W_const5 == (W_const5 + 2*padding_const5 - X_const5 + 1)/stride_const5);
assert(pooled_H_const5 == ceil((output_H_const5 - pooling_radius_const5 + 1.f)/pooling_stride_const5));
assert(pooled_W_const5 == ceil((output_W_const5 - pooling_radius_const5 + 1.f)/pooling_stride_const5));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const5 + W_const5)*N,
HYWN = (H_const5-Y_const5)*W_const5*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const5/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const5/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const5/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const5*output_W_const5*N_BLOCK*K_BLOCK : output_H_const5*output_W_const5*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const5/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const5; h++){
for (w = 0; w < output_W_const5; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const5, output_W_const5, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const5, output_W_const5, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const5, output_W_const5, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const5, output_W_const5, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const5 >= 0 &&
h - padding_const5 >= 0 &&
output_W_const5 - 1 - w >= padding_const5 &&
output_H_const5 - 1 - h >= padding_const5){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const5, w - padding_const5, 0, C_const5, H_const5, W_const5, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const5, Y_const5, X_const5, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const5 + Y_const5, w - padding_const5 + X_const5 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const5; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const5*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const5; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const5 + W_const5)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const5, H_const5-1), 0), mx(mn(w-padding_const5, W_const5-1), 0), 0, C_const5, H_const5, W_const5, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const5, Y_const5, X_const5, K_BLOCK);
int min_x = mx(0, (padding_const5 - w));
int max_x = mn(X_const5, (W_const5 + padding_const5 - w));
int min_y = mx(0, (padding_const5 - h));
int max_y = mn(Y_const5, (H_const5 + padding_const5 - h));
filters_pointer += min_y*X_const5*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const5*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const5-padding_const5)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const5 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const5*N_BLOCK;
} //y
filters_pointer += (Y_const5 - max_y)*X_const5*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const5; h++){
for (w = 0; w < pooled_W_const5; w++){
int h_output = h*pooling_stride_const5;
int w_output = w*pooling_stride_const5;
int window_width = pooling_radius_const5 - mx(w_output + pooling_radius_const5 - output_W_const5, 0);
int window_height = pooling_radius_const5 - mx(h_output + pooling_radius_const5 - output_H_const5, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const5, output_W_const5, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const5, output_W_const5, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const5, pooled_H_const5, pooled_W_const5, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const5, pooled_H_const5, pooled_W_const5, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const5 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const5 - window_width;
outputs_pointer += (output_W_const5 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer6(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const6);
assert(H == H_const6);
assert(W == W_const6);
assert(K == K_const6);
assert(stride == stride_const6);
assert(padding == padding_const6);
assert(pooling_radius == pooling_radius_const6);
assert(pooling_stride == pooling_stride_const6);
assert(X == X_const6);
assert(Y == Y_const6);
assert(output_H_const6 == (H_const6 + 2*padding_const6 - Y_const6 + 1)/stride_const6);
assert(output_W_const6 == (W_const6 + 2*padding_const6 - X_const6 + 1)/stride_const6);
assert(pooled_H_const6 == ceil((output_H_const6 - pooling_radius_const6 + 1.f)/pooling_stride_const6));
assert(pooled_W_const6 == ceil((output_W_const6 - pooling_radius_const6 + 1.f)/pooling_stride_const6));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const6 + W_const6)*N,
HYWN = (H_const6-Y_const6)*W_const6*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const6/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const6/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const6/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const6*output_W_const6*N_BLOCK*K_BLOCK : output_H_const6*output_W_const6*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const6/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const6; h++){
for (w = 0; w < output_W_const6; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const6, output_W_const6, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const6, output_W_const6, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const6, output_W_const6, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const6, output_W_const6, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const6 >= 0 &&
h - padding_const6 >= 0 &&
output_W_const6 - 1 - w >= padding_const6 &&
output_H_const6 - 1 - h >= padding_const6){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const6, w - padding_const6, 0, C_const6, H_const6, W_const6, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const6, Y_const6, X_const6, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const6 + Y_const6, w - padding_const6 + X_const6 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const6; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const6*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const6; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const6 + W_const6)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const6, H_const6-1), 0), mx(mn(w-padding_const6, W_const6-1), 0), 0, C_const6, H_const6, W_const6, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const6, Y_const6, X_const6, K_BLOCK);
int min_x = mx(0, (padding_const6 - w));
int max_x = mn(X_const6, (W_const6 + padding_const6 - w));
int min_y = mx(0, (padding_const6 - h));
int max_y = mn(Y_const6, (H_const6 + padding_const6 - h));
filters_pointer += min_y*X_const6*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const6*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const6-padding_const6)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const6 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const6*N_BLOCK;
} //y
filters_pointer += (Y_const6 - max_y)*X_const6*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const6; h++){
for (w = 0; w < pooled_W_const6; w++){
int h_output = h*pooling_stride_const6;
int w_output = w*pooling_stride_const6;
int window_width = pooling_radius_const6 - mx(w_output + pooling_radius_const6 - output_W_const6, 0);
int window_height = pooling_radius_const6 - mx(h_output + pooling_radius_const6 - output_H_const6, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const6, output_W_const6, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const6, output_W_const6, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const6, pooled_H_const6, pooled_W_const6, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const6, pooled_H_const6, pooled_W_const6, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const6 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const6 - window_width;
outputs_pointer += (output_W_const6 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer7(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const7);
assert(H == H_const7);
assert(W == W_const7);
assert(K == K_const7);
assert(stride == stride_const7);
assert(padding == padding_const7);
assert(pooling_radius == pooling_radius_const7);
assert(pooling_stride == pooling_stride_const7);
assert(X == X_const7);
assert(Y == Y_const7);
assert(output_H_const7 == (H_const7 + 2*padding_const7 - Y_const7 + 1)/stride_const7);
assert(output_W_const7 == (W_const7 + 2*padding_const7 - X_const7 + 1)/stride_const7);
assert(pooled_H_const7 == ceil((output_H_const7 - pooling_radius_const7 + 1.f)/pooling_stride_const7));
assert(pooled_W_const7 == ceil((output_W_const7 - pooling_radius_const7 + 1.f)/pooling_stride_const7));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const7 + W_const7)*N,
HYWN = (H_const7-Y_const7)*W_const7*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const7/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const7/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const7/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const7*output_W_const7*N_BLOCK*K_BLOCK : output_H_const7*output_W_const7*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const7/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const7; h++){
for (w = 0; w < output_W_const7; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const7, output_W_const7, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const7, output_W_const7, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const7, output_W_const7, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const7, output_W_const7, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const7 >= 0 &&
h - padding_const7 >= 0 &&
output_W_const7 - 1 - w >= padding_const7 &&
output_H_const7 - 1 - h >= padding_const7){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const7, w - padding_const7, 0, C_const7, H_const7, W_const7, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const7, Y_const7, X_const7, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const7 + Y_const7, w - padding_const7 + X_const7 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const7; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const7*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const7; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const7 + W_const7)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const7, H_const7-1), 0), mx(mn(w-padding_const7, W_const7-1), 0), 0, C_const7, H_const7, W_const7, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const7, Y_const7, X_const7, K_BLOCK);
int min_x = mx(0, (padding_const7 - w));
int max_x = mn(X_const7, (W_const7 + padding_const7 - w));
int min_y = mx(0, (padding_const7 - h));
int max_y = mn(Y_const7, (H_const7 + padding_const7 - h));
filters_pointer += min_y*X_const7*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const7*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const7-padding_const7)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const7 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const7*N_BLOCK;
} //y
filters_pointer += (Y_const7 - max_y)*X_const7*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const7; h++){
for (w = 0; w < pooled_W_const7; w++){
int h_output = h*pooling_stride_const7;
int w_output = w*pooling_stride_const7;
int window_width = pooling_radius_const7 - mx(w_output + pooling_radius_const7 - output_W_const7, 0);
int window_height = pooling_radius_const7 - mx(h_output + pooling_radius_const7 - output_H_const7, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const7, output_W_const7, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const7, output_W_const7, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const7, pooled_H_const7, pooled_W_const7, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const7, pooled_H_const7, pooled_W_const7, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const7 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const7 - window_width;
outputs_pointer += (output_W_const7 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer8(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const8);
assert(H == H_const8);
assert(W == W_const8);
assert(K == K_const8);
assert(stride == stride_const8);
assert(padding == padding_const8);
assert(pooling_radius == pooling_radius_const8);
assert(pooling_stride == pooling_stride_const8);
assert(X == X_const8);
assert(Y == Y_const8);
assert(output_H_const8 == (H_const8 + 2*padding_const8 - Y_const8 + 1)/stride_const8);
assert(output_W_const8 == (W_const8 + 2*padding_const8 - X_const8 + 1)/stride_const8);
assert(pooled_H_const8 == ceil((output_H_const8 - pooling_radius_const8 + 1.f)/pooling_stride_const8));
assert(pooled_W_const8 == ceil((output_W_const8 - pooling_radius_const8 + 1.f)/pooling_stride_const8));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const8 + W_const8)*N,
HYWN = (H_const8-Y_const8)*W_const8*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const8/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const8/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const8/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const8*output_W_const8*N_BLOCK*K_BLOCK : output_H_const8*output_W_const8*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const8/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const8; h++){
for (w = 0; w < output_W_const8; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const8, output_W_const8, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const8, output_W_const8, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const8, output_W_const8, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const8, output_W_const8, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const8 >= 0 &&
h - padding_const8 >= 0 &&
output_W_const8 - 1 - w >= padding_const8 &&
output_H_const8 - 1 - h >= padding_const8){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const8, w - padding_const8, 0, C_const8, H_const8, W_const8, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const8, Y_const8, X_const8, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const8 + Y_const8, w - padding_const8 + X_const8 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const8; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const8*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const8; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const8 + W_const8)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const8, H_const8-1), 0), mx(mn(w-padding_const8, W_const8-1), 0), 0, C_const8, H_const8, W_const8, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const8, Y_const8, X_const8, K_BLOCK);
int min_x = mx(0, (padding_const8 - w));
int max_x = mn(X_const8, (W_const8 + padding_const8 - w));
int min_y = mx(0, (padding_const8 - h));
int max_y = mn(Y_const8, (H_const8 + padding_const8 - h));
filters_pointer += min_y*X_const8*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const8*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const8-padding_const8)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const8 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const8*N_BLOCK;
} //y
filters_pointer += (Y_const8 - max_y)*X_const8*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const8; h++){
for (w = 0; w < pooled_W_const8; w++){
int h_output = h*pooling_stride_const8;
int w_output = w*pooling_stride_const8;
int window_width = pooling_radius_const8 - mx(w_output + pooling_radius_const8 - output_W_const8, 0);
int window_height = pooling_radius_const8 - mx(h_output + pooling_radius_const8 - output_H_const8, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const8, output_W_const8, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const8, output_W_const8, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const8, pooled_H_const8, pooled_W_const8, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const8, pooled_H_const8, pooled_W_const8, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const8 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const8 - window_width;
outputs_pointer += (output_W_const8 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer9(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const9);
assert(H == H_const9);
assert(W == W_const9);
assert(K == K_const9);
assert(stride == stride_const9);
assert(padding == padding_const9);
assert(pooling_radius == pooling_radius_const9);
assert(pooling_stride == pooling_stride_const9);
assert(X == X_const9);
assert(Y == Y_const9);
assert(output_H_const9 == (H_const9 + 2*padding_const9 - Y_const9 + 1)/stride_const9);
assert(output_W_const9 == (W_const9 + 2*padding_const9 - X_const9 + 1)/stride_const9);
assert(pooled_H_const9 == ceil((output_H_const9 - pooling_radius_const9 + 1.f)/pooling_stride_const9));
assert(pooled_W_const9 == ceil((output_W_const9 - pooling_radius_const9 + 1.f)/pooling_stride_const9));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const9 + W_const9)*N,
HYWN = (H_const9-Y_const9)*W_const9*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const9/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const9/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const9/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const9*output_W_const9*N_BLOCK*K_BLOCK : output_H_const9*output_W_const9*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const9/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const9; h++){
for (w = 0; w < output_W_const9; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const9, output_W_const9, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const9, output_W_const9, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const9, output_W_const9, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const9, output_W_const9, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const9 >= 0 &&
h - padding_const9 >= 0 &&
output_W_const9 - 1 - w >= padding_const9 &&
output_H_const9 - 1 - h >= padding_const9){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const9, w - padding_const9, 0, C_const9, H_const9, W_const9, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const9, Y_const9, X_const9, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const9 + Y_const9, w - padding_const9 + X_const9 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const9; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const9*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const9; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const9 + W_const9)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const9, H_const9-1), 0), mx(mn(w-padding_const9, W_const9-1), 0), 0, C_const9, H_const9, W_const9, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const9, Y_const9, X_const9, K_BLOCK);
int min_x = mx(0, (padding_const9 - w));
int max_x = mn(X_const9, (W_const9 + padding_const9 - w));
int min_y = mx(0, (padding_const9 - h));
int max_y = mn(Y_const9, (H_const9 + padding_const9 - h));
filters_pointer += min_y*X_const9*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const9*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const9-padding_const9)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const9 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const9*N_BLOCK;
} //y
filters_pointer += (Y_const9 - max_y)*X_const9*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const9; h++){
for (w = 0; w < pooled_W_const9; w++){
int h_output = h*pooling_stride_const9;
int w_output = w*pooling_stride_const9;
int window_width = pooling_radius_const9 - mx(w_output + pooling_radius_const9 - output_W_const9, 0);
int window_height = pooling_radius_const9 - mx(h_output + pooling_radius_const9 - output_H_const9, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const9, output_W_const9, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const9, output_W_const9, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const9, pooled_H_const9, pooled_W_const9, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const9, pooled_H_const9, pooled_W_const9, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const9 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const9 - window_width;
outputs_pointer += (output_W_const9 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
int *convolution_layer10(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const10);
assert(H == H_const10);
assert(W == W_const10);
assert(K == K_const10);
assert(stride == stride_const10);
assert(padding == padding_const10);
assert(pooling_radius == pooling_radius_const10);
assert(pooling_stride == pooling_stride_const10);
assert(X == X_const10);
assert(Y == Y_const10);
assert(output_H_const10 == (H_const10 + 2*padding_const10 - Y_const10 + 1)/stride_const10);
assert(output_W_const10 == (W_const10 + 2*padding_const10 - X_const10 + 1)/stride_const10);
assert(pooled_H_const10 == ceil((output_H_const10 - pooling_radius_const10 + 1.f)/pooling_stride_const10));
assert(pooled_W_const10 == ceil((output_W_const10 - pooling_radius_const10 + 1.f)/pooling_stride_const10));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
// computation of const2ants
int XWN = (-X_const10 + W_const10)*N,
HYWN = (H_const10-Y_const10)*W_const10*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const10/K_BLOCK; nk++){
#if K_BLOCK > N_BLOCK
k_block = nk / (N/N_BLOCK);
k = k_block*K_BLOCK;
n_block = md(nk, N/N_BLOCK);
n = n_block*N_BLOCK;
#else
n_block = nk / (K_const10/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const10/K_BLOCK);
k = k_block*K_BLOCK;
#endif
SCRATCH[omp_get_thread_num()*output_H_const10*output_W_const10*N_BLOCK*K_BLOCK : output_H_const10*output_W_const10*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const10/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const10; h++){
for (w = 0; w < output_W_const10; w++){
#if K_BLOCK > N_BLOCK
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const10, output_W_const10, N_BLOCK, K_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const10, output_W_const10, N_BLOCK, K_BLOCK);
#else
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const10, output_W_const10, K_BLOCK, N_BLOCK);
float *restrict convolutions_next = SCRATCH + ti5(omp_get_thread_num(), h, w+1, 0, 0, output_H_const10, output_W_const10, K_BLOCK, N_BLOCK);
#endif
__assume_aligned(convolutions, 64);
#pragma unroll
for(int i = 0; i < (N_BLOCK*K_BLOCK); i+= 16)
{
_mm_prefetch((char *)(convolutions_next + i), _MM_HINT_ET0);
}
// if we're not on boundary (i.e not affected by padding)
if (w - padding_const10 >= 0 &&
h - padding_const10 >= 0 &&
output_W_const10 - 1 - w >= padding_const10 &&
output_H_const10 - 1 - h >= padding_const10){
#if 1 && defined __MIC__
// The following loads the a N_BLOCK*K_BLOCK region of SCRATCH space into SIMD registers
LOAD_OUTPUTS;
#endif
#pragma unroll (C_BLOCK)
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, h - padding_const10, w - padding_const10, 0, C_const10, H_const10, W_const10, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const10, Y_const10, X_const10, K_BLOCK);
// The idea is to ensure that the working space of INPUTS = [N_BLOCK * C_BLOCK * W * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// The idea is to ensure that the working space of FILTERS = [K_BLOCK * C_BLOCK * X * Y] is small enough to fit in L2 so that we can reuse across [H,W] loops; if not we have a problem; CHECK if performance is low.
// TODO: introduce L2 prefetch for INPUTS + ti5(n_block, c, h - padding_const10 + Y_const10, w - padding_const10 + X_const10 + 2 that may help the above problem if it exists
for (y = 0; y < Y_const10; ++y){
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const10*N_BLOCK), _MM_HINT_T0);
for (x = 0; x < X_const10; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
inputs_pointer += (-X_const10 + W_const10)*N_BLOCK;
} // y
} // cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
}
else{
#if 1 && defined __MIC__
LOAD_OUTPUTS;
#endif
for (int cc = 0; cc < C_BLOCK; cc++){
float *restrict inputs_pointer = INPUTS + ti5(n_block, c+cc, mx(mn(h-padding_const10, H_const10-1), 0), mx(mn(w-padding_const10, W_const10-1), 0), 0, C_const10, H_const10, W_const10, N_BLOCK);
float *restrict filters_pointer = FILTERS + ti5(k_block, c+cc, 0, 0, 0, C_const10, Y_const10, X_const10, K_BLOCK);
int min_x = mx(0, (padding_const10 - w));
int max_x = mn(X_const10, (W_const10 + padding_const10 - w));
int min_y = mx(0, (padding_const10 - h));
int max_y = mn(Y_const10, (H_const10 + padding_const10 - h));
filters_pointer += min_y*X_const10*K_BLOCK;
//TODO: I am fairly sure more prefetches are required for FILTERS here...
for (y = min_y; y < max_y; ++y){
float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
filters_pointer += min_x*K_BLOCK;
// This prefetch is only for INPUTS order [N, C, H, W]
//_mm_prefetch((char *)(inputs_pointer + W_const10*N_BLOCK), _MM_HINT_T0);
#pragma unroll (X_const10-padding_const10)
#pragma noprefetch
for (x = min_x; x < max_x; ++x)
{
#if 1 && defined __MIC__
// This assumes the filters are already in L2...
PREFETCH_INPUTS;
PREFETCH_FILTERS;
LOAD_INPUTS;
LOAD_FILTERS;
COMPUTE_OUTPUTS;
#endif
filters_pointer += K_BLOCK;
inputs_pointer += N_BLOCK;
} // x
filters_pointer += (X_const10 - max_x)*K_BLOCK;
inputs_pointer = inputs_pointer_y + W_const10*N_BLOCK;
} //y
filters_pointer += (Y_const10 - max_y)*X_const10*K_BLOCK;
} //cc
#if 1 && defined __MIC__
STORE_OUTPUTS;
#endif
} // if-else
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const10; h++){
for (w = 0; w < pooled_W_const10; w++){
int h_output = h*pooling_stride_const10;
int w_output = w*pooling_stride_const10;
int window_width = pooling_radius_const10 - mx(w_output + pooling_radius_const10 - output_W_const10, 0);
int window_height = pooling_radius_const10 - mx(h_output + pooling_radius_const10 - output_H_const10, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
#if K_BLOCK > N_BLOCK
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, 0, kk, output_H_const10, output_W_const10, N_BLOCK, K_BLOCK);
#else
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const10, output_W_const10, K_BLOCK, N_BLOCK);
#endif
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const10, pooled_H_const10, pooled_W_const10, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const10, pooled_H_const10, pooled_W_const10, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const10 + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
#if K_BLOCK > N_BLOCK
if (outputs_pointer[0 : N_BLOCK : K_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK : K_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#else
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
#endif
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const10 - window_width;
outputs_pointer += (output_W_const10 - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
// gradient for intense blocking/data sturctures
// INPUTS data structure [N, C/C_BLOCK, H, W, C_BLOCK]
// D_FILTERS/FILTERS data structure [C/C_BLOCK, Y/Y_BLOCK, K, Y_BLOCK, X, C_BLOCK]
// ARGMAXS/OUTPUTS/D_POOLED_OUTPUTS data structure [N, pooled_H, pooled_W, K]
// void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(padding == padding_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
// assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
// #pragma offload target(mic:MIC_DEV) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(D_POOLED_OUTPUTS:length(0) REUSE) \
// in(D_FILTERS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int C_blocks = C_const/C_BLOCK_GRAD;
// int Y_blocks = Y_const/Y_BLOCK_GRAD;
// int N_blocks = N/N_BLOCK_GRAD;
// int H_blocks = output_H_const/H_ARG_BLOCK_GRAD;
// int W_blocks = output_W_const/W_ARG_BLOCK_GRAD;
// omp_lock_t writelock[C_BLOCK_GRAD*Y_BLOCK_GRAD*16];
// for(int i = 0; i < C_BLOCK_GRAD*Y_BLOCK_GRAD; i++)
// omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// shared(N, INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
// for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
// int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
// int n = n_block*N_BLOCK_GRAD;
// int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
// int h = h_block * H_ARG_BLOCK_GRAD;
// int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
// int w = w_block * W_ARG_BLOCK_GRAD;
// int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
// int c = c_block * C_BLOCK_GRAD;
// int y_block = md(outer, Y_blocks);
// int y = y_block * Y_BLOCK_GRAD;
// int size_local_scratch = Y_BLOCK_GRAD * X_const * C_BLOCK_GRAD;
// float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
// C_blocks, Y_blocks, K_const, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
// local_scratch[ 0 : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD] = 0.f;
// // for each element in the pre-pooled outputs, find out whether it is an argmax
// for (int n_tot = n; n_tot < n + N_BLOCK_GRAD; n_tot++){
// for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD, output_H_const); h_arg++){
// if ((y + h_arg - padding_const < 0) || (y + h_arg - padding_const >= H_const)) continue;
// int h_start = mx((h_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0);
// int h_end = mn(h_arg/pooling_stride_const, pooled_H_const - 1);
// int h_inputs = h_arg + y - padding_const;
// for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD, output_W_const); w_arg++){
// int linear_index = h_arg*output_W_const + w_arg;
// // figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
// int x_invalid_left = mx(padding_const - w_arg, 0);
// int x_invalid_right = mx(w_arg - padding_const + X_const - W_const, 0);
// int x_len = mx(X_const - x_invalid_left - x_invalid_right, 0);
// int x_len_aligned = x_len / 2 * 2;
// int w_start = mx((w_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0);
// int w_end = mn(w_arg/pooling_stride_const, pooled_W_const - 1);
// int w_inputs = mx(mn(w_arg - padding_const, W_const - 1), 0);
// float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const, W_const, C_BLOCK_GRAD);
// int full_x_line = (x_len == X_const);
// __declspec(aligned(64)) float pooled_outputs_coefficients[K_const];
// __declspec(aligned(64)) int ks[K_const];
// for(int i = 0; i < X_const; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
// if(full_x_line)
// {
// // scan over all windows in which this element appears
// for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
// for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
// int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const, pooled_W_const, K_const);
// int cnt = K_const;
// #if defined __MIC__
// __m512i v_linear_index = _mm512_set1_epi32(linear_index);
// cnt = 0;
// __m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
// for (int k = 0; k < K_const; k+=16){
// __m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
// __m512i v_ARGMAXS = _mm512_undefined_epi32();
// __m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
// v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
// v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
// __mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
// v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
// v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
// _mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
// _mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
// _mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
// _mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
// cnt += _mm_countbits_32(m);
// }
// #endif
// for (int k2 = 0; k2 < cnt; k2++){
// // if this (h_arg, w_arg) is the argmax of the window
// int k = ks[k2];
// float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
// float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
// float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
// float * restrict INPUTS_pointer = INPUTS_pointer_base;
// #if (C_BLOCK_GRAD == 16) && (defined __MIC__)
// __m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
// #pragma unroll (X_const)
// for(int i = 0; i < X_const; i++)
// {
// _mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
// __m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
// _mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// local_scratch_pointer += 16;
// INPUTS_pointer += 16;
// }
// #else
// local_scratch_pointer[0 : x_len*C_BLOCK_GRAD] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD];
// #endif
// } // k
// } // w_pooled
// } // h_pooled
// }
// else
// {
// // scan over all windows in which this element appears
// for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
// for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
// int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const, pooled_W_const, K_const);
// int cnt = K_const;
// #if defined __MIC__
// __m512i v_linear_index = _mm512_set1_epi32(linear_index);
// cnt = 0;
// __m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
// for (int k = 0; k < K_const; k+=16){
// __m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
// __m512i v_ARGMAXS = _mm512_undefined_epi32();
// __m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
// v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
// v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
// v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
// v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
// __mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
// _mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
// _mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
// _mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
// _mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
// cnt += _mm_countbits_32(m);
// }
// #endif
// for (int k2 = 0; k2 < cnt; k2++){
// // if this (h_arg, w_arg) is the argmax of the window
// int k = ks[k2];
// float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
// float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
// float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
// float * restrict INPUTS_pointer = INPUTS_pointer_base;
// #if (C_BLOCK_GRAD == 16) && defined __MIC__
// __m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
// for(int x = 0; x < x_len_aligned; x+=2)
// {
// _mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
// _mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
// __m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
// v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
// _mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// }
// for(int x = x_len_aligned; x < x_len; x++)
// {
// _mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
// __m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
// _mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// }
// #else
// local_scratch_pointer[0 : x_len*C_BLOCK_GRAD] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD];
// #endif
// } // k
// } // w_pooled
// } // h_pooled
// }
// } // w_arg
// } // h_arg
// } // nn
// omp_set_lock(&writelock[16*c_block*y_block]);
// {
// D_FILTERS[c_block * y_block * K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD] += local_scratch[ 0 : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD];
// }
// omp_unset_lock(&writelock[16*c_block*y_block]);
// } // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
// #if 0
// for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
// #pragma omp parallel for schedule(dynamic)
// for(int inner = 0; inner < C_blocks*Y_blocks*K_const*Y_BLOCK_GRAD; inner++)
// {
// float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD + inner*X_const*C_BLOCK_GRAD;
// float *d_filters_pointer = D_FILTERS + inner*X_const*C_BLOCK_GRAD;
// d_filters_pointer[0: X_const*C_BLOCK_GRAD] += local_scratch_pointer[0 : X_const*C_BLOCK_GRAD];
// }
// }
// #endif
// } // pragma offload
// }
// INPUTS/D_INPUTS data structure [N, C/C_BLOCK, H, W, C_BLOCK]
// D_FILTERS/FILTERS data structure [C/C_BLOCK, Y/Y_BLOCK, K, Y_BLOCK, X, C_BLOCK]
// ARGMAXS/OUTPUTS/D_POOLED_OUTPUTS data structure [N, pooled_H, pooled_W, K]
void convolution_gradient_layer2(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const2);
assert(H == H_const2);
assert(W == W_const2);
assert(K == K_const2);
assert(padding == padding_const2);
assert(X == X_const2);
assert(Y == Y_const2);
assert(output_H_const2 == (H_const2 + 2*padding_const2 - Y_const2 + 1)/stride_const2);
assert(output_W_const2 == (W_const2 + 2*padding_const2 - X_const2 + 1)/stride_const2);
assert(pooled_H_const2 == ceil((output_H_const2 - pooling_radius_const2 + 1.f)/pooling_stride_const2));
assert(pooled_W_const2 == ceil((output_W_const2 - pooling_radius_const2 + 1.f)/pooling_stride_const2));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const2/C_BLOCK_GRAD2;
int Y_blocks = Y_const2/Y_BLOCK_GRAD2;
int N_blocks = N/N_BLOCK_GRAD2;
int H_blocks = output_H_const2/H_ARG_BLOCK_GRAD2;
int W_blocks = output_W_const2/W_ARG_BLOCK_GRAD2;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD2;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD2;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD2;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD2;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD2;
int size_local_scratch = Y_BLOCK_GRAD2 * X_const2 * C_BLOCK_GRAD2;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const2, Y_BLOCK_GRAD2, X_const2, C_BLOCK_GRAD2);
local_scratch[ 0 : K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD2; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD2, output_H_const2); h_arg++){
if ((y + h_arg - padding_const2 < 0) || (y + h_arg - padding_const2 >= H_const2)) continue;
int h_start = mx((h_arg + pooling_stride_const2 - pooling_radius_const2)/pooling_stride_const2, 0);
int h_end = mn(h_arg/pooling_stride_const2, pooled_H_const2 - 1);
int h_inputs = h_arg + y - padding_const2;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD2, output_W_const2); w_arg++){
int linear_index = h_arg*output_W_const2 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const2 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const2 + X_const2 - W_const2, 0);
int x_len = mx(X_const2 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const2 - pooling_radius_const2)/pooling_stride_const2, 0);
int w_end = mn(w_arg/pooling_stride_const2, pooled_W_const2 - 1);
int w_inputs = mx(mn(w_arg - padding_const2, W_const2 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const2, W_const2, C_BLOCK_GRAD2);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const2, W_const2, C_BLOCK_GRAD2);
int full_x_line = (x_len == X_const2);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const2];
__declspec(aligned(64)) int ks[K_const2];
for(int i = 0; i < X_const2; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const2, pooled_W_const2, K_const2);
int cnt = K_const2;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const2; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD2 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const2)
for(int i = 0; i < X_const2; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD2] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD2];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD2] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD2];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const2, pooled_W_const2, K_const2);
int cnt = K_const2;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const2; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD2, X_const2, C_BLOCK_GRAD2);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD2, X_const2, C_BLOCK_GRAD2);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD2, X_const2, C_BLOCK_GRAD2);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD2, X_const2, C_BLOCK_GRAD2);
#if (C_BLOCK_GRAD2 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD2 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD2 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD2 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD2 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD2 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD2 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD2 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD2 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD2, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD2), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD2 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD2] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD2];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD2] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD2];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 : K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2] += local_scratch[ 0 : K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer3(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const3);
assert(H == H_const3);
assert(W == W_const3);
assert(K == K_const3);
assert(padding == padding_const3);
assert(X == X_const3);
assert(Y == Y_const3);
assert(output_H_const3 == (H_const3 + 2*padding_const3 - Y_const3 + 1)/stride_const3);
assert(output_W_const3 == (W_const3 + 2*padding_const3 - X_const3 + 1)/stride_const3);
assert(pooled_H_const3 == ceil((output_H_const3 - pooling_radius_const3 + 1.f)/pooling_stride_const3));
assert(pooled_W_const3 == ceil((output_W_const3 - pooling_radius_const3 + 1.f)/pooling_stride_const3));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const3/C_BLOCK_GRAD3;
int Y_blocks = Y_const3/Y_BLOCK_GRAD3;
int N_blocks = N/N_BLOCK_GRAD3;
int H_blocks = output_H_const3/H_ARG_BLOCK_GRAD3;
int W_blocks = output_W_const3/W_ARG_BLOCK_GRAD3;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD3;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD3;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD3;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD3;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD3;
int size_local_scratch = Y_BLOCK_GRAD3 * X_const3 * C_BLOCK_GRAD3;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const3, Y_BLOCK_GRAD3, X_const3, C_BLOCK_GRAD3);
local_scratch[ 0 : K_const3*Y_BLOCK_GRAD3*X_const3*C_BLOCK_GRAD3] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const3*Y_BLOCK_GRAD3*X_const3*C_BLOCK_GRAD3;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD3; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD3, output_H_const3); h_arg++){
if ((y + h_arg - padding_const3 < 0) || (y + h_arg - padding_const3 >= H_const3)) continue;
int h_start = mx((h_arg + pooling_stride_const3 - pooling_radius_const3)/pooling_stride_const3, 0);
int h_end = mn(h_arg/pooling_stride_const3, pooled_H_const3 - 1);
int h_inputs = h_arg + y - padding_const3;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD3, output_W_const3); w_arg++){
int linear_index = h_arg*output_W_const3 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const3 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const3 + X_const3 - W_const3, 0);
int x_len = mx(X_const3 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const3 - pooling_radius_const3)/pooling_stride_const3, 0);
int w_end = mn(w_arg/pooling_stride_const3, pooled_W_const3 - 1);
int w_inputs = mx(mn(w_arg - padding_const3, W_const3 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const3, W_const3, C_BLOCK_GRAD3);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const3, W_const3, C_BLOCK_GRAD3);
int full_x_line = (x_len == X_const3);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const3];
__declspec(aligned(64)) int ks[K_const3];
for(int i = 0; i < X_const3; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const3, pooled_W_const3, K_const3);
int cnt = K_const3;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const3; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD3 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const3)
for(int i = 0; i < X_const3; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD3] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD3];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD3] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD3];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const3, pooled_W_const3, K_const3);
int cnt = K_const3;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const3; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD3, X_const3, C_BLOCK_GRAD3);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD3, X_const3, C_BLOCK_GRAD3);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD3, X_const3, C_BLOCK_GRAD3);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD3, X_const3, C_BLOCK_GRAD3);
#if (C_BLOCK_GRAD3 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD3 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD3 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD3 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD3 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD3 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD3 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD3 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD3 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD3, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD3), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD3 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD3] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD3];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD3] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD3];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const3*Y_BLOCK_GRAD3*X_const3*C_BLOCK_GRAD3 : K_const3*Y_BLOCK_GRAD3*X_const3*C_BLOCK_GRAD3] += local_scratch[ 0 : K_const3*Y_BLOCK_GRAD3*X_const3*C_BLOCK_GRAD3];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer4(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const4);
assert(H == H_const4);
assert(W == W_const4);
assert(K == K_const4);
assert(padding == padding_const4);
assert(X == X_const4);
assert(Y == Y_const4);
assert(output_H_const4 == (H_const4 + 2*padding_const4 - Y_const4 + 1)/stride_const4);
assert(output_W_const4 == (W_const4 + 2*padding_const4 - X_const4 + 1)/stride_const4);
assert(pooled_H_const4 == ceil((output_H_const4 - pooling_radius_const4 + 1.f)/pooling_stride_const4));
assert(pooled_W_const4 == ceil((output_W_const4 - pooling_radius_const4 + 1.f)/pooling_stride_const4));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const4/C_BLOCK_GRAD4;
int Y_blocks = Y_const4/Y_BLOCK_GRAD4;
int N_blocks = N/N_BLOCK_GRAD4;
int H_blocks = output_H_const4/H_ARG_BLOCK_GRAD4;
int W_blocks = output_W_const4/W_ARG_BLOCK_GRAD4;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD4;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD4;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD4;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD4;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD4;
int size_local_scratch = Y_BLOCK_GRAD4 * X_const4 * C_BLOCK_GRAD4;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const4, Y_BLOCK_GRAD4, X_const4, C_BLOCK_GRAD4);
local_scratch[ 0 : K_const4*Y_BLOCK_GRAD4*X_const4*C_BLOCK_GRAD4] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const4*Y_BLOCK_GRAD4*X_const4*C_BLOCK_GRAD4;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD4; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD4, output_H_const4); h_arg++){
if ((y + h_arg - padding_const4 < 0) || (y + h_arg - padding_const4 >= H_const4)) continue;
int h_start = mx((h_arg + pooling_stride_const4 - pooling_radius_const4)/pooling_stride_const4, 0);
int h_end = mn(h_arg/pooling_stride_const4, pooled_H_const4 - 1);
int h_inputs = h_arg + y - padding_const4;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD4, output_W_const4); w_arg++){
int linear_index = h_arg*output_W_const4 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const4 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const4 + X_const4 - W_const4, 0);
int x_len = mx(X_const4 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const4 - pooling_radius_const4)/pooling_stride_const4, 0);
int w_end = mn(w_arg/pooling_stride_const4, pooled_W_const4 - 1);
int w_inputs = mx(mn(w_arg - padding_const4, W_const4 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const4, W_const4, C_BLOCK_GRAD4);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const4, W_const4, C_BLOCK_GRAD4);
int full_x_line = (x_len == X_const4);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const4];
__declspec(aligned(64)) int ks[K_const4];
for(int i = 0; i < X_const4; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const4, pooled_W_const4, K_const4);
int cnt = K_const4;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const4; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD4 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const4)
for(int i = 0; i < X_const4; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD4] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD4];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD4] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD4];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const4, pooled_W_const4, K_const4);
int cnt = K_const4;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const4; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD4, X_const4, C_BLOCK_GRAD4);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD4, X_const4, C_BLOCK_GRAD4);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD4, X_const4, C_BLOCK_GRAD4);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD4, X_const4, C_BLOCK_GRAD4);
#if (C_BLOCK_GRAD4 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD4 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD4 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD4 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD4 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD4 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD4 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD4 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD4 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD4, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD4), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD4 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD4] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD4];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD4] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD4];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const4*Y_BLOCK_GRAD4*X_const4*C_BLOCK_GRAD4 : K_const4*Y_BLOCK_GRAD4*X_const4*C_BLOCK_GRAD4] += local_scratch[ 0 : K_const4*Y_BLOCK_GRAD4*X_const4*C_BLOCK_GRAD4];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer5(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const5);
assert(H == H_const5);
assert(W == W_const5);
assert(K == K_const5);
assert(padding == padding_const5);
assert(X == X_const5);
assert(Y == Y_const5);
assert(output_H_const5 == (H_const5 + 2*padding_const5 - Y_const5 + 1)/stride_const5);
assert(output_W_const5 == (W_const5 + 2*padding_const5 - X_const5 + 1)/stride_const5);
assert(pooled_H_const5 == ceil((output_H_const5 - pooling_radius_const5 + 1.f)/pooling_stride_const5));
assert(pooled_W_const5 == ceil((output_W_const5 - pooling_radius_const5 + 1.f)/pooling_stride_const5));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const5/C_BLOCK_GRAD5;
int Y_blocks = Y_const5/Y_BLOCK_GRAD5;
int N_blocks = N/N_BLOCK_GRAD5;
int H_blocks = output_H_const5/H_ARG_BLOCK_GRAD5;
int W_blocks = output_W_const5/W_ARG_BLOCK_GRAD5;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD5;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD5;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD5;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD5;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD5;
int size_local_scratch = Y_BLOCK_GRAD5 * X_const5 * C_BLOCK_GRAD5;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const5, Y_BLOCK_GRAD5, X_const5, C_BLOCK_GRAD5);
local_scratch[ 0 : K_const5*Y_BLOCK_GRAD5*X_const5*C_BLOCK_GRAD5] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const5*Y_BLOCK_GRAD5*X_const5*C_BLOCK_GRAD5;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD5; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD5, output_H_const5); h_arg++){
if ((y + h_arg - padding_const5 < 0) || (y + h_arg - padding_const5 >= H_const5)) continue;
int h_start = mx((h_arg + pooling_stride_const5 - pooling_radius_const5)/pooling_stride_const5, 0);
int h_end = mn(h_arg/pooling_stride_const5, pooled_H_const5 - 1);
int h_inputs = h_arg + y - padding_const5;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD5, output_W_const5); w_arg++){
int linear_index = h_arg*output_W_const5 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const5 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const5 + X_const5 - W_const5, 0);
int x_len = mx(X_const5 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const5 - pooling_radius_const5)/pooling_stride_const5, 0);
int w_end = mn(w_arg/pooling_stride_const5, pooled_W_const5 - 1);
int w_inputs = mx(mn(w_arg - padding_const5, W_const5 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const5, W_const5, C_BLOCK_GRAD5);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const5, W_const5, C_BLOCK_GRAD5);
int full_x_line = (x_len == X_const5);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const5];
__declspec(aligned(64)) int ks[K_const5];
for(int i = 0; i < X_const5; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const5, pooled_W_const5, K_const5);
int cnt = K_const5;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const5; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD5 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const5)
for(int i = 0; i < X_const5; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD5] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD5];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD5] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD5];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const5, pooled_W_const5, K_const5);
int cnt = K_const5;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const5; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD5, X_const5, C_BLOCK_GRAD5);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD5, X_const5, C_BLOCK_GRAD5);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD5, X_const5, C_BLOCK_GRAD5);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD5, X_const5, C_BLOCK_GRAD5);
#if (C_BLOCK_GRAD5 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD5 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD5 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD5 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD5 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD5 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD5 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD5 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD5 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD5, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD5), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD5 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD5] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD5];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD5] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD5];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const5*Y_BLOCK_GRAD5*X_const5*C_BLOCK_GRAD5 : K_const5*Y_BLOCK_GRAD5*X_const5*C_BLOCK_GRAD5] += local_scratch[ 0 : K_const5*Y_BLOCK_GRAD5*X_const5*C_BLOCK_GRAD5];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer6(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const6);
assert(H == H_const6);
assert(W == W_const6);
assert(K == K_const6);
assert(padding == padding_const6);
assert(X == X_const6);
assert(Y == Y_const6);
assert(output_H_const6 == (H_const6 + 2*padding_const6 - Y_const6 + 1)/stride_const6);
assert(output_W_const6 == (W_const6 + 2*padding_const6 - X_const6 + 1)/stride_const6);
assert(pooled_H_const6 == ceil((output_H_const6 - pooling_radius_const6 + 1.f)/pooling_stride_const6));
assert(pooled_W_const6 == ceil((output_W_const6 - pooling_radius_const6 + 1.f)/pooling_stride_const6));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const6/C_BLOCK_GRAD6;
int Y_blocks = Y_const6/Y_BLOCK_GRAD6;
int N_blocks = N/N_BLOCK_GRAD6;
int H_blocks = output_H_const6/H_ARG_BLOCK_GRAD6;
int W_blocks = output_W_const6/W_ARG_BLOCK_GRAD6;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD6;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD6;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD6;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD6;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD6;
int size_local_scratch = Y_BLOCK_GRAD6 * X_const6 * C_BLOCK_GRAD6;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const6, Y_BLOCK_GRAD6, X_const6, C_BLOCK_GRAD6);
local_scratch[ 0 : K_const6*Y_BLOCK_GRAD6*X_const6*C_BLOCK_GRAD6] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const6*Y_BLOCK_GRAD6*X_const6*C_BLOCK_GRAD6;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD6; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD6, output_H_const6); h_arg++){
if ((y + h_arg - padding_const6 < 0) || (y + h_arg - padding_const6 >= H_const6)) continue;
int h_start = mx((h_arg + pooling_stride_const6 - pooling_radius_const6)/pooling_stride_const6, 0);
int h_end = mn(h_arg/pooling_stride_const6, pooled_H_const6 - 1);
int h_inputs = h_arg + y - padding_const6;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD6, output_W_const6); w_arg++){
int linear_index = h_arg*output_W_const6 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const6 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const6 + X_const6 - W_const6, 0);
int x_len = mx(X_const6 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const6 - pooling_radius_const6)/pooling_stride_const6, 0);
int w_end = mn(w_arg/pooling_stride_const6, pooled_W_const6 - 1);
int w_inputs = mx(mn(w_arg - padding_const6, W_const6 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const6, W_const6, C_BLOCK_GRAD6);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const6, W_const6, C_BLOCK_GRAD6);
int full_x_line = (x_len == X_const6);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const6];
__declspec(aligned(64)) int ks[K_const6];
for(int i = 0; i < X_const6; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const6, pooled_W_const6, K_const6);
int cnt = K_const6;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const6; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD6 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const6)
for(int i = 0; i < X_const6; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD6] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD6];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD6] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD6];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const6, pooled_W_const6, K_const6);
int cnt = K_const6;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const6; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD6, X_const6, C_BLOCK_GRAD6);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD6, X_const6, C_BLOCK_GRAD6);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD6, X_const6, C_BLOCK_GRAD6);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD6, X_const6, C_BLOCK_GRAD6);
#if (C_BLOCK_GRAD6 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD6 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD6 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD6 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD6 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD6 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD6 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD6 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD6 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD6, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD6), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD6 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD6] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD6];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD6] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD6];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const6*Y_BLOCK_GRAD6*X_const6*C_BLOCK_GRAD6 : K_const6*Y_BLOCK_GRAD6*X_const6*C_BLOCK_GRAD6] += local_scratch[ 0 : K_const6*Y_BLOCK_GRAD6*X_const6*C_BLOCK_GRAD6];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer7(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const7);
assert(H == H_const7);
assert(W == W_const7);
assert(K == K_const7);
assert(padding == padding_const7);
assert(X == X_const7);
assert(Y == Y_const7);
assert(output_H_const7 == (H_const7 + 2*padding_const7 - Y_const7 + 1)/stride_const7);
assert(output_W_const7 == (W_const7 + 2*padding_const7 - X_const7 + 1)/stride_const7);
assert(pooled_H_const7 == ceil((output_H_const7 - pooling_radius_const7 + 1.f)/pooling_stride_const7));
assert(pooled_W_const7 == ceil((output_W_const7 - pooling_radius_const7 + 1.f)/pooling_stride_const7));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const7/C_BLOCK_GRAD7;
int Y_blocks = Y_const7/Y_BLOCK_GRAD7;
int N_blocks = N/N_BLOCK_GRAD7;
int H_blocks = output_H_const7/H_ARG_BLOCK_GRAD7;
int W_blocks = output_W_const7/W_ARG_BLOCK_GRAD7;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD7;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD7;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD7;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD7;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD7;
int size_local_scratch = Y_BLOCK_GRAD7 * X_const7 * C_BLOCK_GRAD7;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const7, Y_BLOCK_GRAD7, X_const7, C_BLOCK_GRAD7);
local_scratch[ 0 : K_const7*Y_BLOCK_GRAD7*X_const7*C_BLOCK_GRAD7] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const7*Y_BLOCK_GRAD7*X_const7*C_BLOCK_GRAD7;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD7; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD7, output_H_const7); h_arg++){
if ((y + h_arg - padding_const7 < 0) || (y + h_arg - padding_const7 >= H_const7)) continue;
int h_start = mx((h_arg + pooling_stride_const7 - pooling_radius_const7)/pooling_stride_const7, 0);
int h_end = mn(h_arg/pooling_stride_const7, pooled_H_const7 - 1);
int h_inputs = h_arg + y - padding_const7;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD7, output_W_const7); w_arg++){
int linear_index = h_arg*output_W_const7 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const7 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const7 + X_const7 - W_const7, 0);
int x_len = mx(X_const7 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const7 - pooling_radius_const7)/pooling_stride_const7, 0);
int w_end = mn(w_arg/pooling_stride_const7, pooled_W_const7 - 1);
int w_inputs = mx(mn(w_arg - padding_const7, W_const7 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const7, W_const7, C_BLOCK_GRAD7);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const7, W_const7, C_BLOCK_GRAD7);
int full_x_line = (x_len == X_const7);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const7];
__declspec(aligned(64)) int ks[K_const7];
for(int i = 0; i < X_const7; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const7, pooled_W_const7, K_const7);
int cnt = K_const7;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const7; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD7 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const7)
for(int i = 0; i < X_const7; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD7] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD7];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD7] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD7];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const7, pooled_W_const7, K_const7);
int cnt = K_const7;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const7; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD7, X_const7, C_BLOCK_GRAD7);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD7, X_const7, C_BLOCK_GRAD7);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD7, X_const7, C_BLOCK_GRAD7);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD7, X_const7, C_BLOCK_GRAD7);
#if (C_BLOCK_GRAD7 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD7 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD7 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD7 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD7 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD7 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD7 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD7 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD7 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD7, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD7), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD7 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD7] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD7];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD7] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD7];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const7*Y_BLOCK_GRAD7*X_const7*C_BLOCK_GRAD7 : K_const7*Y_BLOCK_GRAD7*X_const7*C_BLOCK_GRAD7] += local_scratch[ 0 : K_const7*Y_BLOCK_GRAD7*X_const7*C_BLOCK_GRAD7];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer8(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const8);
assert(H == H_const8);
assert(W == W_const8);
assert(K == K_const8);
assert(padding == padding_const8);
assert(X == X_const8);
assert(Y == Y_const8);
assert(output_H_const8 == (H_const8 + 2*padding_const8 - Y_const8 + 1)/stride_const8);
assert(output_W_const8 == (W_const8 + 2*padding_const8 - X_const8 + 1)/stride_const8);
assert(pooled_H_const8 == ceil((output_H_const8 - pooling_radius_const8 + 1.f)/pooling_stride_const8));
assert(pooled_W_const8 == ceil((output_W_const8 - pooling_radius_const8 + 1.f)/pooling_stride_const8));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const8/C_BLOCK_GRAD8;
int Y_blocks = Y_const8/Y_BLOCK_GRAD8;
int N_blocks = N/N_BLOCK_GRAD8;
int H_blocks = output_H_const8/H_ARG_BLOCK_GRAD8;
int W_blocks = output_W_const8/W_ARG_BLOCK_GRAD8;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD8;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD8;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD8;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD8;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD8;
int size_local_scratch = Y_BLOCK_GRAD8 * X_const8 * C_BLOCK_GRAD8;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const8, Y_BLOCK_GRAD8, X_const8, C_BLOCK_GRAD8);
local_scratch[ 0 : K_const8*Y_BLOCK_GRAD8*X_const8*C_BLOCK_GRAD8] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const8*Y_BLOCK_GRAD8*X_const8*C_BLOCK_GRAD8;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD8; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD8, output_H_const8); h_arg++){
if ((y + h_arg - padding_const8 < 0) || (y + h_arg - padding_const8 >= H_const8)) continue;
int h_start = mx((h_arg + pooling_stride_const8 - pooling_radius_const8)/pooling_stride_const8, 0);
int h_end = mn(h_arg/pooling_stride_const8, pooled_H_const8 - 1);
int h_inputs = h_arg + y - padding_const8;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD8, output_W_const8); w_arg++){
int linear_index = h_arg*output_W_const8 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const8 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const8 + X_const8 - W_const8, 0);
int x_len = mx(X_const8 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const8 - pooling_radius_const8)/pooling_stride_const8, 0);
int w_end = mn(w_arg/pooling_stride_const8, pooled_W_const8 - 1);
int w_inputs = mx(mn(w_arg - padding_const8, W_const8 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const8, W_const8, C_BLOCK_GRAD8);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const8, W_const8, C_BLOCK_GRAD8);
int full_x_line = (x_len == X_const8);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const8];
__declspec(aligned(64)) int ks[K_const8];
for(int i = 0; i < X_const8; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const8, pooled_W_const8, K_const8);
int cnt = K_const8;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const8; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD8 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const8)
for(int i = 0; i < X_const8; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD8] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD8];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD8] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD8];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const8, pooled_W_const8, K_const8);
int cnt = K_const8;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const8; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD8, X_const8, C_BLOCK_GRAD8);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD8, X_const8, C_BLOCK_GRAD8);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD8, X_const8, C_BLOCK_GRAD8);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD8, X_const8, C_BLOCK_GRAD8);
#if (C_BLOCK_GRAD8 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD8 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD8 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD8 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD8 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD8 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD8 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD8 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD8 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD8, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD8), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD8 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD8] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD8];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD8] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD8];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const8*Y_BLOCK_GRAD8*X_const8*C_BLOCK_GRAD8 : K_const8*Y_BLOCK_GRAD8*X_const8*C_BLOCK_GRAD8] += local_scratch[ 0 : K_const8*Y_BLOCK_GRAD8*X_const8*C_BLOCK_GRAD8];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer9(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const9);
assert(H == H_const9);
assert(W == W_const9);
assert(K == K_const9);
assert(padding == padding_const9);
assert(X == X_const9);
assert(Y == Y_const9);
assert(output_H_const9 == (H_const9 + 2*padding_const9 - Y_const9 + 1)/stride_const9);
assert(output_W_const9 == (W_const9 + 2*padding_const9 - X_const9 + 1)/stride_const9);
assert(pooled_H_const9 == ceil((output_H_const9 - pooling_radius_const9 + 1.f)/pooling_stride_const9));
assert(pooled_W_const9 == ceil((output_W_const9 - pooling_radius_const9 + 1.f)/pooling_stride_const9));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const9/C_BLOCK_GRAD9;
int Y_blocks = Y_const9/Y_BLOCK_GRAD9;
int N_blocks = N/N_BLOCK_GRAD9;
int H_blocks = output_H_const9/H_ARG_BLOCK_GRAD9;
int W_blocks = output_W_const9/W_ARG_BLOCK_GRAD9;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD9;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD9;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD9;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD9;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD9;
int size_local_scratch = Y_BLOCK_GRAD9 * X_const9 * C_BLOCK_GRAD9;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const9, Y_BLOCK_GRAD9, X_const9, C_BLOCK_GRAD9);
local_scratch[ 0 : K_const9*Y_BLOCK_GRAD9*X_const9*C_BLOCK_GRAD9] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const9*Y_BLOCK_GRAD9*X_const9*C_BLOCK_GRAD9;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD9; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD9, output_H_const9); h_arg++){
if ((y + h_arg - padding_const9 < 0) || (y + h_arg - padding_const9 >= H_const9)) continue;
int h_start = mx((h_arg + pooling_stride_const9 - pooling_radius_const9)/pooling_stride_const9, 0);
int h_end = mn(h_arg/pooling_stride_const9, pooled_H_const9 - 1);
int h_inputs = h_arg + y - padding_const9;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD9, output_W_const9); w_arg++){
int linear_index = h_arg*output_W_const9 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const9 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const9 + X_const9 - W_const9, 0);
int x_len = mx(X_const9 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const9 - pooling_radius_const9)/pooling_stride_const9, 0);
int w_end = mn(w_arg/pooling_stride_const9, pooled_W_const9 - 1);
int w_inputs = mx(mn(w_arg - padding_const9, W_const9 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const9, W_const9, C_BLOCK_GRAD9);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const9, W_const9, C_BLOCK_GRAD9);
int full_x_line = (x_len == X_const9);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const9];
__declspec(aligned(64)) int ks[K_const9];
for(int i = 0; i < X_const9; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const9, pooled_W_const9, K_const9);
int cnt = K_const9;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const9; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD9 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const9)
for(int i = 0; i < X_const9; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD9] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD9];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD9] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD9];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const9, pooled_W_const9, K_const9);
int cnt = K_const9;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const9; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD9, X_const9, C_BLOCK_GRAD9);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD9, X_const9, C_BLOCK_GRAD9);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD9, X_const9, C_BLOCK_GRAD9);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD9, X_const9, C_BLOCK_GRAD9);
#if (C_BLOCK_GRAD9 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD9 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD9 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD9 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD9 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD9 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD9 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD9 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD9 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD9, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD9), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD9 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD9] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD9];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD9] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD9];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const9*Y_BLOCK_GRAD9*X_const9*C_BLOCK_GRAD9 : K_const9*Y_BLOCK_GRAD9*X_const9*C_BLOCK_GRAD9] += local_scratch[ 0 : K_const9*Y_BLOCK_GRAD9*X_const9*C_BLOCK_GRAD9];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
void convolution_gradient_layer10(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const10);
assert(H == H_const10);
assert(W == W_const10);
assert(K == K_const10);
assert(padding == padding_const10);
assert(X == X_const10);
assert(Y == Y_const10);
assert(output_H_const10 == (H_const10 + 2*padding_const10 - Y_const10 + 1)/stride_const10);
assert(output_W_const10 == (W_const10 + 2*padding_const10 - X_const10 + 1)/stride_const10);
assert(pooled_H_const10 == ceil((output_H_const10 - pooling_radius_const10 + 1.f)/pooling_stride_const10));
assert(pooled_W_const10 == ceil((output_W_const10 - pooling_radius_const10 + 1.f)/pooling_stride_const10));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const10/C_BLOCK_GRAD10;
int Y_blocks = Y_const10/Y_BLOCK_GRAD10;
int N_blocks = N/N_BLOCK_GRAD10;
int H_blocks = output_H_const10/H_ARG_BLOCK_GRAD10;
int W_blocks = output_W_const10/W_ARG_BLOCK_GRAD10;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD10;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD10;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD10;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD10;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD10;
int size_local_scratch = Y_BLOCK_GRAD10 * X_const10 * C_BLOCK_GRAD10;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const10, Y_BLOCK_GRAD10, X_const10, C_BLOCK_GRAD10);
local_scratch[ 0 : K_const10*Y_BLOCK_GRAD10*X_const10*C_BLOCK_GRAD10] = 0.f;
float * restrict FILTERS_pointer_base = FILTERS + (c_block * Y_blocks + y_block) * K_const10*Y_BLOCK_GRAD10*X_const10*C_BLOCK_GRAD10;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD10; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD10, output_H_const10); h_arg++){
if ((y + h_arg - padding_const10 < 0) || (y + h_arg - padding_const10 >= H_const10)) continue;
int h_start = mx((h_arg + pooling_stride_const10 - pooling_radius_const10)/pooling_stride_const10, 0);
int h_end = mn(h_arg/pooling_stride_const10, pooled_H_const10 - 1);
int h_inputs = h_arg + y - padding_const10;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD10, output_W_const10); w_arg++){
int linear_index = h_arg*output_W_const10 + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const10 - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const10 + X_const10 - W_const10, 0);
int x_len = mx(X_const10 - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const10 - pooling_radius_const10)/pooling_stride_const10, 0);
int w_end = mn(w_arg/pooling_stride_const10, pooled_W_const10 - 1);
int w_inputs = mx(mn(w_arg - padding_const10, W_const10 - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const10, W_const10, C_BLOCK_GRAD10);
float * restrict D_INPUTS_pointer_base = D_INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const10, W_const10, C_BLOCK_GRAD10);
int full_x_line = (x_len == X_const10);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const10];
__declspec(aligned(64)) int ks[K_const10];
for(int i = 0; i < X_const10; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const10, pooled_W_const10, K_const10);
int cnt = K_const10;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const10; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
#if (C_BLOCK_GRAD10 == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const10)
for(int i = 0; i < X_const10; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + i*16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
FILTERS_pointer += 16;
D_INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD10] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD10];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD10] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD10];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const10, pooled_W_const10, K_const10);
int cnt = K_const10;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const10; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD10, X_const10, C_BLOCK_GRAD10);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD10, X_const10, C_BLOCK_GRAD10);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
float * restrict D_INPUTS_pointer = D_INPUTS_pointer_base;
// float * restrict FILTERS_pointer = FILTERS_pointer_base + k*size_local_scratch;
// float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ks[k2+1]*size_local_scratch;
float * restrict FILTERS_pointer = FILTERS_pointer_base + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD10, X_const10, C_BLOCK_GRAD10);
float * restrict FILTERS_pointer_next = FILTERS_pointer_base + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD10, X_const10, C_BLOCK_GRAD10);
#if (C_BLOCK_GRAD10 == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16 + 16), _MM_HINT_T0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD10 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_0 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input_1 = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD10 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD10 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_0 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters_1 = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD10 + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
v_d_input_0 = _mm512_fmadd_ps(v_filters_0, v_pooled_outputs_coefficient, v_d_input_0);
v_d_input_1 = _mm512_fmadd_ps(v_filters_1, v_pooled_outputs_coefficient, v_d_input_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD10 ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD10 + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD10 ), v_d_input_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD10 + 16), v_d_input_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(FILTERS_pointer_next + x*16), _MM_HINT_T0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_d_input = _mm512_extload_ps(D_INPUTS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_filters = _mm512_extload_ps(FILTERS_pointer + x*C_BLOCK_GRAD10, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
v_d_input = _mm512_fmadd_ps(v_filters, v_pooled_outputs_coefficient, v_d_input);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD10), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(D_INPUTS_pointer + x*C_BLOCK_GRAD10 ), v_d_input, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD10] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD10];
D_INPUTS_pointer[0 : x_len*C_BLOCK_GRAD10] += pooled_outputs_coefficient * FILTERS_pointer[0 : x_len*C_BLOCK_GRAD10];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const10*Y_BLOCK_GRAD10*X_const10*C_BLOCK_GRAD10 : K_const10*Y_BLOCK_GRAD10*X_const10*C_BLOCK_GRAD10] += local_scratch[ 0 : K_const10*Y_BLOCK_GRAD10*X_const10*C_BLOCK_GRAD10];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const2*Y_BLOCK_GRAD2*X_const2*C_BLOCK_GRAD2 + inner*X_const2*C_BLOCK_GRAD2;
float *d_filters_pointer = D_FILTERS + inner*X_const2*C_BLOCK_GRAD2;
d_filters_pointer[0: X_const2*C_BLOCK_GRAD2] += local_scratch_pointer[0 : X_const2*C_BLOCK_GRAD2];
}
}
#endif
} // pragma offload
}
// INPUTS data structure [N, C/C_BLOCK, H, W, C_BLOCK]
// D_FILTERS/FILTERS data structure [K, Y, X, C]
// ARGMAXS/OUTPUTS/D_POOLED_OUTPUTS data structure [N, pooled_H, pooled_W, K]
// void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(padding == padding_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
// assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
// #pragma offload target(mic:MIC_DEV) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(D_POOLED_OUTPUTS:length(0) REUSE) \
// in(D_FILTERS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int C_blocks = C_const/C_BLOCK_GRAD;
// int Y_blocks = Y_const/Y_BLOCK_GRAD;
// int N_blocks = N/N_BLOCK_GRAD;
// int H_blocks = output_H_const/H_ARG_BLOCK_GRAD;
// int W_blocks = output_W_const/W_ARG_BLOCK_GRAD;
// SCRATCH[0 : omp_get_max_threads()*C_blocks*Y_blocks*K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD] = 0.f;
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// shared(N, INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks)
// for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
// int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
// int n = n_block*N_BLOCK_GRAD;
// int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
// int h = h_block * H_ARG_BLOCK_GRAD;
// int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
// int w = w_block * W_ARG_BLOCK_GRAD;
// int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
// int c = c_block * C_BLOCK_GRAD;
// int y_block = md(outer, Y_blocks);
// int y = y_block * Y_BLOCK_GRAD;
// assert(outer == ti5(n_block, h_block, w_block, c_block, y_block, H_blocks, W_blocks, C_blocks, Y_blocks));
// float *restrict local_scratch = SCRATCH + ti7(omp_get_thread_num(), c_block, y_block, 0, 0, 0, 0,
// C_blocks, Y_blocks, K_const, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
// // for each element in the pre-pooled outputs, find out whether it is an argmax
// for (int n_tot = n; n_tot < n + N_BLOCK_GRAD; n_tot++){
// for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD, output_H_const); h_arg++){
// for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD, output_W_const); w_arg++){
// int linear_index = h_arg*output_W_const + w_arg;
// int h_start = mx((h_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0); // ceil((h_arg - pooling_radius + 1)/pooling_stride)
// int w_start = mx((w_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0);
// int h_end = mn(h_arg/pooling_stride_const, pooled_H_const - 1); // floor(h_arg/pooling_stride_const)
// int w_end = mn(w_arg/pooling_stride_const, pooled_W_const - 1);
// // scan over all windows in which this element appears
// for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
// for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
// // for (int h_pooled = 0; h_pooled < pooled_H_const; h_pooled++){
// // for (int w_pooled = 0; w_pooled < pooled_W_const; w_pooled++){
// for (int k = 0; k < K_const; k++){
// int pooled_index = ti(n_tot, h_pooled, w_pooled, k, pooled_H_const, pooled_W_const, K_const);
// // if this (h_arg, w_arg) is the argmax of the window
// if (ARGMAXS[pooled_index] == linear_index){
// float pooled_outputs_coefficient = D_POOLED_OUTPUTS[pooled_index];
// // figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
// int x_invalid_left = mx(padding_const - w_arg, 0);
// int x_invalid_right = mx(w_arg - padding_const + X_const - W_const, 0);
// int x_len = mx(X_const - x_invalid_left - x_invalid_right, 0);
// int w_inputs = mx(mn(w_arg - padding_const, W_const - 1), 0);
// for (int yy = 0; yy < Y_BLOCK_GRAD; yy++){
// if ((y + yy + h_arg - padding_const >= 0) && (y + yy + h_arg - padding_const < H_const)){
// int h_inputs = h_arg + y + yy - padding_const;
// local_scratch[ti(k, yy, x_invalid_left, 0,
// Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD) : x_len*C_BLOCK_GRAD] +=
// pooled_outputs_coefficient *
// INPUTS[ti5(n_tot, c_block, h_inputs, w_inputs, 0,
// C_blocks, H_const, W_const, C_BLOCK_GRAD) : x_len*C_BLOCK_GRAD];
// } // if
// } //yy
// } // if
// } // k
// } // w_pooled
// } // h_pooled
// } // w_arg
// } // h_arg
// } // nn
// } // outer
// for (int thread = 0; thread < omp_get_max_threads(); thread++){
// #pragma omp parallel for
// for (int k = 0; k < K_const; k++){
// for (int c_block = 0; c_block < C_blocks; c_block++){
// int c = c_block*C_BLOCK_GRAD;
// for (int y = 0; y < Y_const; y++){
// int y_block = y / Y_BLOCK_GRAD;
// int yy = md(y, Y_BLOCK_GRAD);
// for (int x = 0; x < X_const; x++){
// D_FILTERS[ti(k, y, x, c, Y_const, X_const, C_const) : C_BLOCK_GRAD] +=
// SCRATCH[ti7(thread, c_block, y_block, k, yy, x, 0,
// C_blocks, Y_blocks, K_const, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD) : C_BLOCK_GRAD];
// }
// }
// }
// }
// }
// } // pragma offload
// }
void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
assert(C == C_const);
assert(H == H_const);
assert(W == W_const);
assert(K == K_const);
assert(padding == padding_const);
assert(X == X_const);
assert(Y == Y_const);
assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(D_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(D_POOLED_OUTPUTS:length(0) REUSE) \
in(D_FILTERS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int C_blocks = C_const/C_BLOCK_GRAD;
int Y_blocks = Y_const/Y_BLOCK_GRAD;
int N_blocks = N/N_BLOCK_GRAD;
int H_blocks = output_H_const/H_ARG_BLOCK_GRAD;
int W_blocks = output_W_const/W_ARG_BLOCK_GRAD;
omp_lock_t writelock[C_blocks*Y_blocks*16];
for(int i = 0; i < C_blocks*Y_blocks; i++)
omp_init_lock(&writelock[16*i]);
// double st = omp_get_wtime();
#pragma omp parallel for \
default(none) \
schedule(dynamic) \
shared(N, INPUTS, D_INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH, C_blocks, Y_blocks, N_blocks, H_blocks, W_blocks, writelock)
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks * C_blocks * Y_blocks; outer++){
int n_block = outer / (H_blocks * W_blocks * C_blocks * Y_blocks);
int n = n_block*N_BLOCK_GRAD;
int h_block = md(outer, H_blocks * W_blocks * C_blocks * Y_blocks) / (W_blocks * C_blocks * Y_blocks);
int h = h_block * H_ARG_BLOCK_GRAD;
int w_block = md(outer, W_blocks * C_blocks * Y_blocks) / (C_blocks * Y_blocks);
int w = w_block * W_ARG_BLOCK_GRAD;
int c_block = md(outer, C_blocks * Y_blocks) / (Y_blocks);
int c = c_block * C_BLOCK_GRAD;
int y_block = md(outer, Y_blocks);
int y = y_block * Y_BLOCK_GRAD;
int size_local_scratch = Y_BLOCK_GRAD * X_const * C_BLOCK_GRAD;
float *restrict local_scratch = SCRATCH + ti7(n_block*H_blocks*W_blocks + h_block*W_blocks + w_block, c_block, y_block, 0, 0, 0, 0,
C_blocks, Y_blocks, K_const, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
local_scratch[ 0 : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD] = 0.f;
// for each element in the pre-pooled outputs, find out whether it is an argmax
for (int n_tot = n; n_tot < n + N_BLOCK_GRAD; n_tot++){
for (int h_arg = h; h_arg < mn(h + H_ARG_BLOCK_GRAD, output_H_const); h_arg++){
if ((y + h_arg - padding_const < 0) || (y + h_arg - padding_const >= H_const)) continue;
int h_start = mx((h_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0);
int h_end = mn(h_arg/pooling_stride_const, pooled_H_const - 1);
int h_inputs = h_arg + y - padding_const;
for (int w_arg = w; w_arg < mn(w + W_ARG_BLOCK_GRAD, output_W_const); w_arg++){
int linear_index = h_arg*output_W_const + w_arg;
// figure out the width of the window that is valid (i.e, not out-of-bounds for INPUTS)
int x_invalid_left = mx(padding_const - w_arg, 0);
int x_invalid_right = mx(w_arg - padding_const + X_const - W_const, 0);
int x_len = mx(X_const - x_invalid_left - x_invalid_right, 0);
int x_len_aligned = x_len / 2 * 2;
int w_start = mx((w_arg + pooling_stride_const - pooling_radius_const)/pooling_stride_const, 0);
int w_end = mn(w_arg/pooling_stride_const, pooled_W_const - 1);
int w_inputs = mx(mn(w_arg - padding_const, W_const - 1), 0);
float * restrict INPUTS_pointer_base = INPUTS + ti5(n_tot, c_block, h_inputs, w_inputs, 0, C_blocks, H_const, W_const, C_BLOCK_GRAD);
int full_x_line = (x_len == X_const);
__declspec(aligned(64)) float pooled_outputs_coefficients[K_const];
__declspec(aligned(64)) int ks[K_const];
for(int i = 0; i < X_const; i++) _mm_prefetch((char *)(INPUTS_pointer_base + i*16), _MM_HINT_T0);
if(full_x_line)
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const, pooled_W_const, K_const);
int cnt = K_const;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + k*size_local_scratch;
float * restrict local_scratch_pointer_next = local_scratch + ks[k2+1]*size_local_scratch;
float * restrict INPUTS_pointer = INPUTS_pointer_base;
#if (C_BLOCK_GRAD == 16) && (defined __MIC__)
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
#pragma unroll (X_const)
for(int i = 0; i < X_const; i++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + i*16), _MM_HINT_ET0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
_mm512_extstore_ps((float *)(local_scratch_pointer), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
local_scratch_pointer += 16;
INPUTS_pointer += 16;
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD];
#endif
} // k
} // w_pooled
} // h_pooled
}
else
{
// scan over all windows in which this element appears
for (int h_pooled = h_start; h_pooled <= h_end; h_pooled++){
for (int w_pooled = w_start; w_pooled <= w_end; w_pooled++){
int pooled_index = ti(n_tot, h_pooled, w_pooled, 0, pooled_H_const, pooled_W_const, K_const);
int cnt = K_const;
#if defined __MIC__
__m512i v_linear_index = _mm512_set1_epi32(linear_index);
cnt = 0;
__m512i v_0to15 = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
for (int k = 0; k < K_const; k+=16){
__m512i v_k = _mm512_add_epi32(v_0to15, _mm512_set1_epi32(k));
__m512i v_ARGMAXS = _mm512_undefined_epi32();
__m512 v_D_POOLED_OUTPUTS = _mm512_undefined_ps();
v_ARGMAXS = _mm512_loadunpacklo_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k);
v_ARGMAXS = _mm512_loadunpackhi_epi32(v_ARGMAXS, ARGMAXS + pooled_index + k + 16);
v_D_POOLED_OUTPUTS = _mm512_loadunpacklo_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k);
v_D_POOLED_OUTPUTS = _mm512_loadunpackhi_ps(v_D_POOLED_OUTPUTS, D_POOLED_OUTPUTS + pooled_index + k + 16);
__mmask16 m = _mm512_cmpeq_epi32_mask(v_ARGMAXS, v_linear_index);
_mm512_mask_packstorelo_ps((float *)(pooled_outputs_coefficients + cnt), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorehi_ps((float *)(pooled_outputs_coefficients + cnt + 16), m, v_D_POOLED_OUTPUTS);
_mm512_mask_packstorelo_epi32((int *)(ks + cnt), m, v_k);
_mm512_mask_packstorehi_epi32((int *)(ks + cnt + 16), m, v_k);
cnt += _mm_countbits_32(m);
}
#endif
for (int k2 = 0; k2 < cnt; k2++){
// if this (h_arg, w_arg) is the argmax of the window
int k = ks[k2];
float pooled_outputs_coefficient = pooled_outputs_coefficients[k2];
float * restrict local_scratch_pointer = local_scratch + ti(k, 0, x_invalid_left, 0, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
float * restrict local_scratch_pointer_next = local_scratch + ti(ks[k2+1], 0, 0, 0, Y_BLOCK_GRAD, X_const, C_BLOCK_GRAD);
float * restrict INPUTS_pointer = INPUTS_pointer_base;
#if (C_BLOCK_GRAD == 16) && defined __MIC__
__m512 v_pooled_outputs_coefficient = _mm512_set1_ps(pooled_outputs_coefficient);
for(int x = 0; x < x_len_aligned; x+=2)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
_mm_prefetch((char *)(local_scratch_pointer_next + x*16 + 16), _MM_HINT_ET0);
__m512 v_input_0 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_input_1 = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_0 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch_1 = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch_0 = _mm512_fmadd_ps(v_input_0, v_pooled_outputs_coefficient, v_scratch_0);
v_scratch_1 = _mm512_fmadd_ps(v_input_1, v_pooled_outputs_coefficient, v_scratch_1);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD ), v_scratch_0, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD + 16), v_scratch_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
for(int x = x_len_aligned; x < x_len; x++)
{
_mm_prefetch((char *)(local_scratch_pointer_next + x*16), _MM_HINT_ET0);
__m512 v_input = _mm512_extload_ps(INPUTS_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 v_scratch = _mm512_extload_ps(local_scratch_pointer + x*C_BLOCK_GRAD, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
v_scratch = _mm512_fmadd_ps(v_input, v_pooled_outputs_coefficient, v_scratch);
_mm512_extstore_ps((float *)(local_scratch_pointer + x*C_BLOCK_GRAD), v_scratch, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
}
#else
local_scratch_pointer[0 : x_len*C_BLOCK_GRAD] += pooled_outputs_coefficient * INPUTS_pointer[0 : x_len*C_BLOCK_GRAD];
#endif
} // k
} // w_pooled
} // h_pooled
}
} // w_arg
} // h_arg
} // nn
omp_set_lock(&writelock[16*c_block*y_block]);
{
D_FILTERS[(c_block * Y_blocks + y_block) * K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD] += local_scratch[ 0 : K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD];
}
omp_unset_lock(&writelock[16*c_block*y_block]);
} // outer
// double end = omp_get_wtime();
// printf("Time first loop = %.5lf\n", (end - st));
#if 0
for (int outer = 0; outer < N_blocks * H_blocks * W_blocks ; outer++){
#pragma omp parallel for schedule(dynamic)
for(int inner = 0; inner < C_blocks*Y_blocks*K_const*Y_BLOCK_GRAD; inner++)
{
float *local_scratch_pointer = SCRATCH + outer*C_blocks*Y_blocks*K_const*Y_BLOCK_GRAD*X_const*C_BLOCK_GRAD + inner*X_const*C_BLOCK_GRAD;
float *d_filters_pointer = D_FILTERS + inner*X_const*C_BLOCK_GRAD;
d_filters_pointer[0: X_const*C_BLOCK_GRAD] += local_scratch_pointer[0 : X_const*C_BLOCK_GRAD];
}
}
#endif
} // pragma offload
}
// local filtering after interleaving N and K and blocking C, before intrinsics
int *local_filtering_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
assert(C == C_const);
assert(H == H_const);
assert(W == W_const);
assert(K == K_const);
assert(stride == stride_const);
assert(padding == padding_const);
assert(pooling_radius == pooling_radius_const);
assert(pooling_stride == pooling_stride_const);
assert(X == X_const);
assert(Y == Y_const);
assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
#pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
int nk, hw, ij, nkhw;
int XWN = (-X_const + W_const)*N,
HYWN = (H_const-Y_const)*W_const*N;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
for (nk = 0; nk < N/N_BLOCK*K_const/K_BLOCK; nk++){
n_block = nk / (K_const/K_BLOCK);
n = n_block*N_BLOCK;
k_block = md(nk, K_const/K_BLOCK);
k = k_block*K_BLOCK;
SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*N_BLOCK*K_BLOCK : output_H_const*output_W_const*N_BLOCK*K_BLOCK] = 0.f;
for (c_block = 0; c_block < C_const/C_BLOCK; c_block++){
c = c_block*C_BLOCK;
for (h = 0; h < output_H_const; h++){
for (w = 0; w < output_W_const; w++){
float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
for (y = 0; y < Y_const; ++y){
for (x = 0; x < X_const; ++x){
if ((h + y - padding_const >= 0) && (h + y - padding_const < H_const) && (w + x - padding_const >= 0) && (w + x - padding_const < W_const)){
float *restrict filters_pointer = FILTERS + ti7(k_block, h, w, c, y, x, 0, output_H_const, output_W_const, C_const, Y_const, X_const, K_BLOCK);
float *restrict inputs_pointer = INPUTS + ti5(n_block, c, h + y - padding_const, w + x - padding_const, 0, C_const, H_const, W_const, N_BLOCK);
for (int cc = 0; cc < C_BLOCK; cc++){
for (int kk = 0; kk < K_BLOCK; kk++){
convolutions[kk*N_BLOCK : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*filters_pointer);
filters_pointer++;
} //kk
filters_pointer += Y_const*X_const*K_BLOCK - K_BLOCK;
inputs_pointer += H_const*W_const*N_BLOCK;
} // cc
} // if
} // x
} // y
} // w
} // h
} // c_block
// ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
for (h = 0; h < pooled_H_const; h++){
for (w = 0; w < pooled_W_const; w++){
int h_output = h*pooling_stride_const;
int w_output = w*pooling_stride_const;
int window_width = pooling_radius_const - mx(w_output + pooling_radius_const - output_W_const, 0);
int window_height = pooling_radius_const - mx(h_output + pooling_radius_const - output_H_const, 0);
for (int kk = 0; kk < K_BLOCK; kk++){
float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
int outputs_index = h_output*output_W_const + w_output;
for (y = 0; y < window_height; y++){
for (x = 0; x < window_width; x++){
if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
argmaxs_pointer[0 : N_BLOCK] = outputs_index;
}
outputs_index++;
outputs_pointer += K_BLOCK*N_BLOCK;
}
outputs_index += output_W_const - window_width;
outputs_pointer += (output_W_const - window_width)*K_BLOCK*N_BLOCK;
}
}
}
}
} //nk
} // pragma_offload
}
void get_argmaxs(int N, int C, int H, int W, float *restrict INPUTS, float *restrict OUTPUTS, int *restrict ARGMAXS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE) \
in(ARGMAXS:length(0) REUSE)
{
int n_block, n, c, h, w;
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(n, c, h, w) \
shared(N, INPUTS, OUTPUTS, ARGMAXS, C, H, W)
for (int nc = 0; nc < N*C; nc++){
n = nc / C;
c = md(nc, C);
for (h = 0; h < H; h++){
for (w = 0; w < W; w++){
OUTPUTS[ti(c, h, w, 0, H, W, N) : N] = INPUTS[ARGMAXS[ti(c, h, w, 0, H, W, N): N] + c*H*W*N];
} // w
} // h
} // nc
} // pragma offload
}
// void permute_dimensions(int D1, int D2, int D3, int D4, int perm1, int perm2, int perm3, int perm4, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int i1, i2, i3, i4;
// SCRATCH[0 : D1*D2*D3*D4] = TENSOR[0 : D1*D2*D3*D4];
// int dimensions[4] = {D1, D2, D3, D4};
// int P1 = dimensions[perm1];
// int P2 = dimensions[perm2];
// int P3 = dimensions[perm3];
// int P4 = dimensions[perm4];
// // printf("(%d, %d, %d, %d)\n (%d, %d, %d, %d)\n", D1, D2, D3, D4, P1, P2, P3, P4);
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(i1, i2, i3, i4) \
// shared(TENSOR, SCRATCH, D1, D2, D3, D4, P1, P2, P3, P4, perm1, perm2, perm3, perm4)
// for (int i1i2 = 0; i1i2 < D1*D2; i1i2++){
// i1 = i1i2 / D2;
// i2 = md(i1i2, D2);
// int indices[4];
// for (i3 = 0; i3 < D3; i3++){
// for (i4 = 0; i4 < D4; i4++){
// indices[0] = i1;
// indices[1] = i2;
// indices[2] = i3;
// indices[3] = i4;
// int p1 = indices[perm1];
// int p2 = indices[perm2];
// int p3 = indices[perm3];
// int p4 = indices[perm4];
// TENSOR[ti(p1, p2, p3, p4, P2, P3, P4)] = SCRATCH[ti(i1, i2, i3, i4, D2, D3, D4)];
// }
// }
// }
// } // pragma offload
// }
// void permute_dimensions_int(int D1, int D2, int D3, int D4, int perm1, int perm2, int perm3, int perm4, int *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int i1, i2, i3, i4;
// SCRATCH[0 : D1*D2*D3*D4] = (float) TENSOR[0 : D1*D2*D3*D4];
// int dimensions[4] = {D1, D2, D3, D4};
// int P1 = dimensions[perm1];
// int P2 = dimensions[perm2];
// int P3 = dimensions[perm3];
// int P4 = dimensions[perm4];
// // printf("(%d, %d, %d, %d)\n (%d, %d, %d, %d)\n", D1, D2, D3, D4, P1, P2, P3, P4);
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(i1, i2, i3, i4) \
// shared(TENSOR, SCRATCH, D1, D2, D3, D4, P1, P2, P3, P4, perm1, perm2, perm3, perm4)
// for (int i1i2 = 0; i1i2 < D1*D2; i1i2++){
// i1 = i1i2 / D2;
// i2 = md(i1i2, D2);
// int indices[4];
// for (i3 = 0; i3 < D3; i3++){
// for (i4 = 0; i4 < D4; i4++){
// indices[0] = i1;
// indices[1] = i2;
// indices[2] = i3;
// indices[3] = i4;
// int p1 = indices[perm1];
// int p2 = indices[perm2];
// int p3 = indices[perm3];
// int p4 = indices[perm4];
// TENSOR[ti(p1, p2, p3, p4, P2, P3, P4)] = (int) SCRATCH[ti(i1, i2, i3, i4, D2, D3, D4)];
// }
// }
// }
// } // pragma offload
// }
void permute_dimensions(int D1, int D2, int D3, int D4, int perm1, int perm2, int perm3, int perm4, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
int i1, i2, i3, i4;
// int D = D1*D2*D3*D4;
// // copy D elements from OUTPUT to INPUT
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict tensor_pointer = OUTPUT + d*64;
// float *restrict scratch_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
//INPUT[0 : D1*D2*D3*D4] = OUTPUT[0 : D1*D2*D3*D4];
int dimensions[4] = {D1, D2, D3, D4};
int P1 = dimensions[perm1];
int P2 = dimensions[perm2];
int P3 = dimensions[perm3];
int P4 = dimensions[perm4];
__declspec(aligned(64)) int inverse_perm[4];
inverse_perm[perm1] = 0;
inverse_perm[perm2] = 1;
inverse_perm[perm3] = 2;
inverse_perm[perm4] = 3;
//printf("(%d, %d, %d, %d) (%d, %d, %d, %d) \n", D1, D2, D3, D4, P1, P2, P3, P4);
#pragma omp parallel for \
schedule(static,8) \
default(none) \
private(i1, i2, i3, i4) \
shared(OUTPUT, INPUT, D1, D2, D3, D4, P1, P2, P3, P4, perm1, perm2, perm3, perm4, inverse_perm)
for (int i1i2 = 0; i1i2 < D1*D2; i1i2++){
i1 = i1i2 / D2;
i2 = md(i1i2, D2);
float * INPUT_base = INPUT + i1*D2*D3*D4 + i2*D3*D4;
__declspec(aligned(64)) int p[4];
p[inverse_perm[0]] = i1;
p[inverse_perm[1]] = i2;
for (i3 = 0; i3 < D3; i3++){
p[inverse_perm[2]] = i3;
float * INPUT_ptr = INPUT_base + i3*D4;
for (i4 = 0; i4 < D4; i4++){
p[inverse_perm[3]] = i4;
OUTPUT[ti(p[0], p[1], p[2], p[3], P2, P3, P4)] = INPUT_ptr[i4];
}
}
}
} // pragma offload
}
void permute_dimensions_int(int D1, int D2, int D3, int D4, int perm1, int perm2, int perm3, int perm4, int *restrict TENSOR, float *restrict SCRATCH){
#pragma offload target(mic:MIC_DEV) \
in(TENSOR:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int i1, i2, i3, i4;
int D = D1*D2*D3*D4;
// copy D elements from TENSOR to SCRATCH
int num_cache_lines_64 = D/64;
int D_aligned = num_cache_lines_64*64;
int D_remaining = D - D_aligned;
float * restrict TENSOR_FLOAT = (float *)(TENSOR);
#pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, TENSOR_FLOAT, SCRATCH)
for(int d = 0; d < num_cache_lines_64; d++)
{
float *restrict tensor_pointer = (float *)(TENSOR_FLOAT) + d*64;
float *restrict scratch_pointer = SCRATCH + d*64;
#if defined __MIC__
__m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
_mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
_mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
_mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
_mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
#endif
}
//copy remaining unaligned elements
SCRATCH[D_aligned : D_remaining] = TENSOR_FLOAT[D_aligned : D_remaining];
int dimensions[4] = {D1, D2, D3, D4};
int P1 = dimensions[perm1];
int P2 = dimensions[perm2];
int P3 = dimensions[perm3];
int P4 = dimensions[perm4];
__declspec(aligned(64)) int inverse_perm[4];
inverse_perm[perm1] = 0;
inverse_perm[perm2] = 1;
inverse_perm[perm3] = 2;
inverse_perm[perm4] = 3;
//printf("(%d, %d, %d, %d) (%d, %d, %d, %d) \n", D1, D2, D3, D4, P1, P2, P3, P4);
#pragma omp parallel for \
schedule(static,8) \
default(none) \
private(i1, i2, i3, i4) \
shared(TENSOR_FLOAT, SCRATCH, D1, D2, D3, D4, P1, P2, P3, P4, perm1, perm2, perm3, perm4, inverse_perm)
for (int i1i2 = 0; i1i2 < D1*D2; i1i2++){
i1 = i1i2 / D2;
i2 = md(i1i2, D2);
float * SCRATCH_base = SCRATCH + i1*D2*D3*D4 + i2*D3*D4;
__declspec(aligned(64)) int p[4];
p[inverse_perm[0]] = i1;
p[inverse_perm[1]] = i2;
for (i3 = 0; i3 < D3; i3++){
p[inverse_perm[2]] = i3;
float * SCRATCH_ptr = SCRATCH_base + i3*D4;
for (i4 = 0; i4 < D4; i4++){
p[inverse_perm[3]] = i4;
TENSOR_FLOAT[ti(p[0], p[1], p[2], p[3], P2, P3, P4)] = SCRATCH_ptr[i4];
}
}
}
} // pragma offload
}
void transpose_replace(int N, int C, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
int n, c;
// int D = N*C;
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict tensor_pointer = (float *)(OUTPUT) + d*64;
// float *restrict scratch_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
int N_aligned = (N/64)*64;
#pragma omp parallel for schedule(static,16) default(none) private(n, c) shared(N, OUTPUT, INPUT, C)
for (int nc = 0; nc < C * (N/64); nc++){
int c = nc % C;
int n = (nc / C)*64;
OUTPUT[c*N + n : 64] = INPUT[c + n*C : 64 : C];
}
#pragma omp parallel for schedule(static,16) default(none) private(n, c) shared(N, N_aligned, OUTPUT, INPUT, C)
for (int c = 0; c < C; c++){
OUTPUT[c*N + N_aligned : (N-N_aligned)] = INPUT[c + N_aligned*C: (N-N_aligned) : C];
}
} // pragma offload
}
void transpose_replace_int(int N, int C, int *restrict TENSOR, float *restrict SCRATCH){
#pragma offload target(mic:MIC_DEV) \
in(TENSOR:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n, c;
int D = N*C;
int num_cache_lines_64 = D/64;
int D_aligned = num_cache_lines_64*64;
int D_remaining = D - D_aligned;
float * restrict TENSOR_FLOAT = (float *)(TENSOR);
#pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, TENSOR_FLOAT, SCRATCH)
for(int d = 0; d < num_cache_lines_64; d++)
{
float *restrict tensor_pointer = (float *)(TENSOR_FLOAT) + d*64;
float *restrict scratch_pointer = SCRATCH + d*64;
#if defined __MIC__
__m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
_mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
_mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
_mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
_mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
#endif
}
//copy remaining unaligned elements
SCRATCH[D_aligned : D_remaining] = TENSOR_FLOAT[D_aligned : D_remaining];
int N_aligned = (N/64)*64;
#pragma omp parallel for schedule(static,16) default(none) private(n, c) shared(N, TENSOR_FLOAT, SCRATCH, C)
for (int nc = 0; nc < C * (N/64); nc++){
int c = nc % C;
int n = nc / C;
TENSOR_FLOAT[c*N + n*64 : 64] = SCRATCH[c + n*64*C : 64 : C];
}
#pragma omp parallel for schedule(static,16) default(none) private(n, c) shared(N, N_aligned, TENSOR_FLOAT, SCRATCH, C)
for (int c = 0; c < C; c++){
TENSOR_FLOAT[c*N + N_aligned : (N-N_aligned)] = SCRATCH[c + N_aligned*C: (N-N_aligned) : C];
}
} // pragma offload
}
// void transpose_replace(int N, int C, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// // mkl_somatcopy('R', 'T', N, C, 1.0, TENSOR, C, SCRATCH, N);
// int n, c;
// SCRATCH[0 : N*C] = TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n, c) \
// shared(N, TENSOR, SCRATCH, C)
// for (int c = 0; c < C; c++){
// TENSOR[c*N : N] = SCRATCH[c : N : C];
// }
// } // pragma offload
// }
// void transpose_replace_int(int N, int C, int *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// // mkl_somatcopy('R', 'T', N, C, 1.0, TENSOR, C, SCRATCH, N);
// int n, c;
// SCRATCH[0 : N*C] = (float) TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n, c) \
// shared(N, TENSOR, SCRATCH, C)
// for (int c = 0; c < C; c++){
// TENSOR[c*N : N] = (int) SCRATCH[c : N : C];
// }
// } // pragma offload
// }
// [C/C_BLOCK, Y/Y_BLOCK, K, Y_BLOCK, X, C_BLOCK]
// void interleave_filters_for_gradient(const int K, const int C, const int Y, const int X, const int C_BLOCKSIZE, const int Y_BLOCKSIZE, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int c_block, cc, y_block, yy, k, c, y, x;
// SCRATCH[0 : K*C*Y*X] = TENSOR[0 : K*C*Y*X];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(c_block, cc, y_block, yy, k, c, y, x) \
// shared(K, C, Y, X, TENSOR, SCRATCH, C_BLOCKSIZE, Y_BLOCKSIZE)
// for (int nc = 0; nc < N*C; nc++){
// n = nc / C;
// c = md(nc, C);
// c_block = c/BLOCKSIZE;
// cc = md(c, BLOCKSIZE);
// for (h = 0; h < H; h++){
// for (w = 0; w < W; w++){
// TENSOR[ti5(n, c_block, h, w, cc, C/BLOCKSIZE, H, W, BLOCKSIZE)] = SCRATCH[ti(n, c, h, w, C, H, W)];
// }
// }
// } // nc
// } // pragma offload
// }
void interleave_for_gradient(const int N, const int C, const int H, const int W, const int BLOCKSIZE, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
int c_block, cc, n, c, h, w;
int C_BLOCKSIZE = C/BLOCKSIZE;
// int D = N*C*H*W;
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict tensor_pointer = (float *)(OUTPUT) + d*64;
// float *restrict scratch_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(tensor_pointer , _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(c_block, n, c, cc, h, w) \
shared(N, H, W, C, C_BLOCKSIZE, OUTPUT, INPUT, BLOCKSIZE)
for (int nc = 0; nc < N*C; nc++){
n = nc / C;
c = md(nc, C);
c_block = c/BLOCKSIZE;
cc = md(c, BLOCKSIZE);
// OUTPUT[ti5(n, c_block, 0, 0, cc, C_BLOCKSIZE, H, W, BLOCKSIZE) : H*W : BLOCKSIZE] = INPUT[ti(n, c, 0, 0, C, H, W) : H*W];
float *restrict OUTPUT_pointer = OUTPUT + ti5(n, c_block, 0, 0, cc, C_BLOCKSIZE, H, W, BLOCKSIZE);
float *restrict INPUT_pointer = INPUT + ti(n, c, 0, 0, C, H, W);
for (h = 0; h < H; h++){
for (w = 0; w < W; w++){
// OUTPUT[ti5(n, c_block, h, w, cc, C_BLOCKSIZE, H, W, BLOCKSIZE)] = INPUT[ti(n, c, h, w, C, H, W)];
*OUTPUT_pointer = *INPUT_pointer;
OUTPUT_pointer += BLOCKSIZE;
INPUT_pointer++;
}
}
} // nc
} // pragma offload
}
void uninterleave_for_gradient(const int N, const int C, const int H, const int W, const int BLOCKSIZE, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
int c_block, cc, n, c, h, w;
int C_BLOCKSIZE = C/BLOCKSIZE;
// int D = N*C*H*W;
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict tensor_pointer = (float *)(OUTPUT) + d*64;
// float *restrict scratch_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(tensor_pointer , _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(c_block, n, c, cc, h, w) \
shared(N, H, W, C, C_BLOCKSIZE, OUTPUT, INPUT, BLOCKSIZE)
for (int nc = 0; nc < N*C; nc++){
n = nc / C;
c = md(nc, C);
c_block = c/BLOCKSIZE;
cc = md(c, BLOCKSIZE);
// for (h = 0; h < H; h++){
// for (w = 0; w < W; w++){
// OUTPUT[ti(n, c, h, w, C, H, W)] = INPUT[ti5(n, c_block, h, w, cc, C_BLOCKSIZE, H, W, BLOCKSIZE)];
// }
// }
float *restrict OUTPUT_pointer = OUTPUT + ti(n, c, 0, 0, C, H, W);
float *restrict INPUT_pointer = INPUT + ti5(n, c_block, 0, 0, cc, C_BLOCKSIZE, H, W, BLOCKSIZE);
for (h = 0; h < H; h++){
for (w = 0; w < W; w++){
// OUTPUT[ti(n, c, h, w, C, H, W)] = INPUT[ti5(n, c_block, h, w, cc, C_BLOCKSIZE, H, W, BLOCKSIZE)];
*OUTPUT_pointer = *INPUT_pointer;
INPUT_pointer += BLOCKSIZE;
OUTPUT_pointer++;
}
}
} // nc
} // pragma offload
}
// void interleave_for_gradient(const int N, const int C, const int H, const int W, const int BLOCKSIZE, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int c_block, cc, n, c, h, w;
// int C_BLOCKSIZE = C/BLOCKSIZE;
// SCRATCH[0 : N*C*H*W] = TENSOR[0 : N*C*H*W];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(c_block, n, c, cc, h, w) \
// shared(N, H, W, C, C_BLOCKSIZE, TENSOR, SCRATCH, BLOCKSIZE)
// for (int nc = 0; nc < N*C; nc++){
// n = nc / C;
// c = md(nc, C);
// c_block = c/BLOCKSIZE;
// cc = md(c, BLOCKSIZE);
// for (h = 0; h < H; h++){
// for (w = 0; w < W; w++){
// TENSOR[ti5(n, c_block, h, w, cc, C_BLOCKSIZE, H, W, BLOCKSIZE)] = SCRATCH[ti(n, c, h, w, C, H, W)];
// }
// }
// } // nc
// } // pragma offload
// }
// void uninterleave_for_gradient(const int N, const int C, const int H, const int W, const int BLOCKSIZE, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int c_block, cc, n, c, h, w;
// int C_BLOCKSIZE = C/BLOCKSIZE;
// SCRATCH[0 : N*C*H*W] = TENSOR[0 : N*C*H*W];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(c_block, n, c, cc, h, w) \
// shared(N, H, W, C, C_BLOCKSIZE, TENSOR, SCRATCH, BLOCKSIZE)
// for (int nc = 0; nc < N*C; nc++){
// n = nc / C;
// c = md(nc, C);
// c_block = c/BLOCKSIZE;
// cc = md(c, BLOCKSIZE);
// for (h = 0; h < H; h++){
// for (w = 0; w < W; w++){
// TENSOR[ti(n, c, h, w, C, H, W)] = SCRATCH[ti5(n, c_block, h, w, cc, C_BLOCKSIZE, H, W, BLOCKSIZE)];
// }
// }
// } // nc
// } // pragma offload
// }
void interleave_block(const int N, const int C, const int BLOCKSIZE, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
assert((N % BLOCKSIZE) == 0);
// int D = N*C;
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict tensor_pointer = (float *)(OUTPUT) + d*64;
// float *restrict scratch_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(tensor_pointer , _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(static,16) \
default(none) \
shared(N, OUTPUT, INPUT, C, BLOCKSIZE)
for(int c = 0; c < C; c++){
float *restrict INPUT_pointer = INPUT + c*N;
#pragma noprefetch
#pragma unroll (4)
for(int n_block = 0; n_block < N/BLOCKSIZE; n_block++)
{
float *restrict OUTPUT_pointer = OUTPUT + ti3(n_block, c, 0, C, BLOCKSIZE);
#if BLOCKSIZE==16 && defined __MIC__
__m512 INPUT_1 = _mm512_extload_ps(INPUT_pointer + n_block*BLOCKSIZE, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(INPUT_pointer + n_block*BLOCKSIZE + N), _MM_HINT_T1);
_mm_prefetch((char *)(INPUT_pointer + (n_block+2)*BLOCKSIZE), _MM_HINT_T0);
_mm512_storenrngo_ps((float *)(OUTPUT_pointer), INPUT_1);
#else
OUTPUT_pointer[0 : BLOCKSIZE] = INPUT_pointer[n_block*BLOCKSIZE : BLOCKSIZE];
#endif
}
}
} // pragma offload
}
void interleave_block_int(const int N, const int C, const int BLOCKSIZE, int *restrict TENSOR, float *restrict SCRATCH){
#pragma offload target(mic:MIC_DEV) \
in(TENSOR:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, c;
assert((N % BLOCKSIZE) == 0);
int D = N*C;
int num_cache_lines_64 = D/64;
int D_aligned = num_cache_lines_64*64;
int D_remaining = D - D_aligned;
#pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, TENSOR, SCRATCH)
for(int d = 0; d < num_cache_lines_64; d++)
{
float *restrict tensor_pointer = (float *)(TENSOR) + d*64;
float *restrict scratch_pointer = SCRATCH + d*64;
#if defined __MIC__
__m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
_mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
_mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
_mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
_mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
#endif
}
//copy remaining unaligned elements
SCRATCH[D_aligned : D_remaining] = TENSOR[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(static,16) \
default(none) \
shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
for(int c = 0; c < C; c++){
float *restrict scratch_pointer = SCRATCH + c*N;
#pragma noprefetch
#pragma unroll (4)
for(int n_block = 0; n_block < N/BLOCKSIZE; n_block++)
{
float *restrict tensor_pointer = (float *)(TENSOR) + ti3(n_block, c, 0, C, BLOCKSIZE);
#if BLOCKSIZE==16 && defined __MIC__
__m512 scratch_1 = _mm512_extload_ps(scratch_pointer + n_block*BLOCKSIZE, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(scratch_pointer + n_block*BLOCKSIZE + N), _MM_HINT_T1);
_mm_prefetch((char *)(scratch_pointer + (n_block+2)*BLOCKSIZE), _MM_HINT_T0);
_mm512_storenrngo_ps((float *)(tensor_pointer), scratch_1);
#else
tensor_pointer[0 : BLOCKSIZE] = scratch_pointer[n_block*BLOCKSIZE : BLOCKSIZE];
#endif
}
}
} // pragma offload
}
void uninterleave_block(const int N, const int C, const int BLOCKSIZE, float *restrict INPUT, float *restrict OUTPUT){
#pragma offload target(mic:MIC_DEV) \
in(OUTPUT:length(0) REUSE) \
in(INPUT:length(0) REUSE)
{
int n_block, n, c;
assert((N % BLOCKSIZE) == 0);
// int D = N*C;
// int num_cache_lines_64 = D/64;
// int D_aligned = num_cache_lines_64*64;
// int D_remaining = D - D_aligned;
// #pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, OUTPUT, INPUT)
// for(int d = 0; d < num_cache_lines_64; d++)
// {
// float *restrict OUTPUT_pointer = (float *)(OUTPUT) + d*64;
// float *restrict INPUT_pointer = INPUT + d*64;
// #if defined __MIC__
// __m512 tens_1 = _mm512_extload_ps(OUTPUT_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_2 = _mm512_extload_ps(OUTPUT_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_3 = _mm512_extload_ps(OUTPUT_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 tens_4 = _mm512_extload_ps(OUTPUT_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// _mm_prefetch((char *)(OUTPUT_pointer + 64 ), _MM_HINT_T0);
// _mm_prefetch((char *)(OUTPUT_pointer + 64 + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(OUTPUT_pointer + 64 + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(OUTPUT_pointer + 64 + 48), _MM_HINT_T0);
// _mm_prefetch((char *)(OUTPUT_pointer + 256), _MM_HINT_T1);
// _mm_prefetch((char *)(OUTPUT_pointer + 256 + 16), _MM_HINT_T1);
// _mm_prefetch((char *)(OUTPUT_pointer + 256 + 32), _MM_HINT_T1);
// _mm_prefetch((char *)(OUTPUT_pointer + 256 + 48), _MM_HINT_T1);
// _mm512_storenrngo_ps((float *)(INPUT_pointer), tens_1);
// _mm512_storenrngo_ps((float *)(INPUT_pointer + 16), tens_2);
// _mm512_storenrngo_ps((float *)(INPUT_pointer + 32), tens_3);
// _mm512_storenrngo_ps((float *)(INPUT_pointer + 48), tens_4);
// #endif
// }
// //copy remaining unaligned elements
// INPUT[D_aligned : D_remaining] = OUTPUT[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(static,16) \
default(none) \
shared(N, OUTPUT, INPUT, C, BLOCKSIZE)
for(int c = 0; c < C; c++){
float *restrict OUTPUT_pointer = OUTPUT + c*N;
#pragma noprefetch
#pragma unroll (4)
for(int n_block = 0; n_block < N/BLOCKSIZE; n_block++)
{
float *restrict INPUT_pointer = INPUT + ti3(n_block, c, 0, C, BLOCKSIZE);
#if BLOCKSIZE==16 && defined __MIC__
__m512 INPUT_1 = _mm512_extload_ps(INPUT_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(INPUT_pointer + 6*C*BLOCKSIZE), _MM_HINT_T1);
_mm_prefetch((char *)(INPUT_pointer + 2*C*BLOCKSIZE), _MM_HINT_T0);
_mm512_storenrngo_ps((float *)(OUTPUT_pointer + n_block*BLOCKSIZE), INPUT_1);
#else
OUTPUT_pointer[n_block*BLOCKSIZE : BLOCKSIZE] = INPUT_pointer[0 : BLOCKSIZE];
#endif
}
}
#if 0
SCRATCH[0 : N*C] = TENSOR[0 : N*C];
#pragma omp parallel for \
schedule(dynamic) \
default(none) \
private(n_block, n, c) \
shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
for (int nc = 0; nc < N/BLOCKSIZE*C; nc++){
n_block = nc / C;
n = n_block*BLOCKSIZE;
c = md(nc, C);
TENSOR[ti2(c, n, N) : BLOCKSIZE] = SCRATCH[ti3(n_block, c, 0, C, BLOCKSIZE) : BLOCKSIZE];
} // nc
#endif
} // pragma offload
}
void uninterleave_block_int(const int N, const int C, const int BLOCKSIZE, int *restrict TENSOR, float *restrict SCRATCH){
#pragma offload target(mic:MIC_DEV) \
in(TENSOR:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int n_block, n, c;
assert((N % BLOCKSIZE) == 0);
int D = N*C;
int num_cache_lines_64 = D/64;
int D_aligned = num_cache_lines_64*64;
int D_remaining = D - D_aligned;
#pragma omp parallel for schedule(static,16) default(none) shared(D, num_cache_lines_64, TENSOR, SCRATCH)
for(int d = 0; d < num_cache_lines_64; d++)
{
float *restrict tensor_pointer = (float *)(TENSOR) + d*64;
float *restrict scratch_pointer = SCRATCH + d*64;
#if defined __MIC__
__m512 tens_1 = _mm512_extload_ps(tensor_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_2 = _mm512_extload_ps(tensor_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_3 = _mm512_extload_ps(tensor_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
__m512 tens_4 = _mm512_extload_ps(tensor_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(tensor_pointer + 64 ), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 16), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 32), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 64 + 48), _MM_HINT_T0);
_mm_prefetch((char *)(tensor_pointer + 256), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 16), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 32), _MM_HINT_T1);
_mm_prefetch((char *)(tensor_pointer + 256 + 48), _MM_HINT_T1);
_mm512_storenrngo_ps((float *)(scratch_pointer), tens_1);
_mm512_storenrngo_ps((float *)(scratch_pointer + 16), tens_2);
_mm512_storenrngo_ps((float *)(scratch_pointer + 32), tens_3);
_mm512_storenrngo_ps((float *)(scratch_pointer + 48), tens_4);
#endif
}
//copy remaining unaligned elements
SCRATCH[D_aligned : D_remaining] = TENSOR[D_aligned : D_remaining];
#pragma omp parallel for \
schedule(static,16) \
default(none) \
shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
for(int c = 0; c < C; c++){
float *restrict tensor_pointer = (float *)(TENSOR) + c*N;
#pragma noprefetch
#pragma unroll (4)
for(int n_block = 0; n_block < N/BLOCKSIZE; n_block++)
{
float *restrict scratch_pointer = SCRATCH + ti3(n_block, c, 0, C, BLOCKSIZE);
#if BLOCKSIZE==16 && defined __MIC__
__m512 scratch_1 = _mm512_extload_ps(scratch_pointer, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
_mm_prefetch((char *)(scratch_pointer + 6*C*BLOCKSIZE), _MM_HINT_T1);
_mm_prefetch((char *)(scratch_pointer + 2*C*BLOCKSIZE), _MM_HINT_T0);
_mm512_storenrngo_ps((float *)(tensor_pointer + n_block*BLOCKSIZE), scratch_1);
#else
tensor_pointer[n_block*BLOCKSIZE : BLOCKSIZE] = scratch_pointer[0 : BLOCKSIZE];
#endif
}
}
} // pragma offload
}
// void interleave_block(const int N, const int C, const int BLOCKSIZE, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, c;
// SCRATCH[0 : N*C] = TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n_block, n, c) \
// shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
// for (int c = 0; c < C; c++){
// // float *restrict tensor_pointer = TENSOR + ti3(n_block, c, 0, C, BLOCKSIZE);
// // float *restrict scratch_pointer = SCRATCH + ti2(c, n, N);
// for (n_block = 0; n_block < N/BLOCKSIZE; n_block++){
// TENSOR[(n_block*C + c)*BLOCKSIZE : BLOCKSIZE] = SCRATCH[c*N + n_block*BLOCKSIZE : BLOCKSIZE];
// }
// } // nc
// } // pragma offload
// }
// void interleave_block_int(const int N, const int C, const int BLOCKSIZE, int *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, c;
// SCRATCH[0 : N*C] = (float) TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n_block, n, c) \
// shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
// for (int nc = 0; nc < N/BLOCKSIZE*C; nc++){
// n_block = nc / C;
// n = n_block*BLOCKSIZE;
// c = md(nc, C);
// int *restrict tensor_pointer = TENSOR + ti3(n_block, c, 0, C, BLOCKSIZE);
// float *restrict scratch_pointer = SCRATCH + ti2(c, n, N);
// tensor_pointer[0 : BLOCKSIZE] = (int) scratch_pointer[0 : BLOCKSIZE];
// } // nc
// } // pragma offload
// }
// void uninterleave_block(const int N, const int C, const int BLOCKSIZE, float *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, c;
// SCRATCH[0 : N*C] = TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n_block, n, c) \
// shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
// for (int nc = 0; nc < N/BLOCKSIZE*C; nc++){
// n_block = nc / C;
// n = n_block*BLOCKSIZE;
// c = md(nc, C);
// TENSOR[ti2(c, n, N) : BLOCKSIZE] = SCRATCH[ti3(n_block, c, 0, C, BLOCKSIZE) : BLOCKSIZE];
// } // nc
// } // pragma offload
// }
// void uninterleave_block_int(const int N, const int C, const int BLOCKSIZE, int *restrict TENSOR, float *restrict SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(TENSOR:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, c;
// SCRATCH[0 : N*C] = (float) TENSOR[0 : N*C];
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(n_block, n, c) \
// shared(N, TENSOR, SCRATCH, C, BLOCKSIZE)
// for (int nc = 0; nc < N/BLOCKSIZE*C; nc++){
// n_block = nc / C;
// n = n_block*BLOCKSIZE;
// c = md(nc, C);
// TENSOR[ti2(c, n, N) : BLOCKSIZE] = (int) SCRATCH[ti3(n_block, c, 0, C, BLOCKSIZE) : BLOCKSIZE];
// } // nc
// } // pragma offload
// }
// Convolution before changing data structure to [N, C, H, W, BLOCK]
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == (output_H_const - pooling_radius_const + 1)/pooling_stride_const);
// assert(pooled_W_const == (output_W_const - pooling_radius_const + 1)/pooling_stride_const);
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k, i, j, h, w, c, y, x;
// int nk, hw, ij, nkhw;
// // computation of constants
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// // SCRATCH[0:K_const*N*output_H_const*output_W_const] = 0.f;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, h, w, c, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/BLOCK*K_const; nk++){
// n_block = nk / K_const;
// n = n_block*BLOCK;
// k = md(nk, K_const);
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*BLOCK : output_H_const*output_W_const*BLOCK] = 0.f;
// for (c = 0; c < C_const; c++){
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// // float *restrict convolutions = SCRATCH + ti(k, h, w, n, output_H_const, output_W_const, N);
// float *restrict convolutions = SCRATCH + ti(omp_get_thread_num(), h, w, 0, output_H_const, output_W_const, BLOCK);
// float *restrict convolutions_next = SCRATCH + ti(omp_get_thread_num(), h, w+1, 0, output_H_const, output_W_const, BLOCK);
// __assume_aligned(convolutions, 64);
// _mm_prefetch((char *)(convolutions_next), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 16), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 32), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 48), _MM_HINT_ET0);
// int kcyx_shift = (k*C_const + c)*Y_const*X_const - 1; // filters
// float *restrict filters_pointer = FILTERS + kcyx_shift;
// // if we're not on boundary (i.e not affected by padding)
// if (w - padding_const >= 0 &&
// h - padding_const >= 0 &&
// output_W_const - 1 - w >= padding_const &&
// output_H_const - 1 - h >= padding_const){
// #if 1 && defined __MIC__
// __m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_3 = _mm512_extload_ps(convolutions + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_4 = _mm512_extload_ps(convolutions + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// #endif
// float *restrict inputs_pointer = INPUTS + ti(c, h - padding_const, w - padding_const, n, H_const, W_const, N);
// for (y = 0; y < Y_const; ++y){
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// for (x = 0; x < X_const; ++x)
// {
// __assume_aligned(inputs_pointer, 64);
// filters_pointer++;
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// #if 1 && defined __MIC__
// __m512 v_filters = _mm512_set1_ps(*filters_pointer);
// {
// res_1 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_1);
// res_2 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_2);
// res_3 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_3);
// res_4 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_4);
// }
// #else
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*(filters_pointer));
// #endif
// inputs_pointer += N;
// } // x
// inputs_pointer += XWN;
// } // y
// #if 1 && defined __MIC__
// _mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 32), res_3, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 48), res_4, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// #endif
// }
// else{
// #if 1 && defined __MIC__
// __m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_3 = _mm512_extload_ps(convolutions + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_4 = _mm512_extload_ps(convolutions + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// #endif
// float *restrict inputs_pointer = INPUTS + ti(c, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), n, H_const, W_const, N);
// int min_x = mx(0, (padding_const - w));
// int max_x = mn(X_const, (W_const + padding_const - w));
// int min_y = mx(0, (padding_const - h));
// int max_y = mn(Y_const, (H_const + padding_const - h));
// #if 1
// filters_pointer += min_y*X_const;
// for (y = min_y; y < max_y; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// filters_pointer += min_x;
// #pragma unroll (X_const-padding_const)
// #pragma noprefetch
// for (x = min_x; x < max_x; ++x)
// {
// __assume_aligned(inputs_pointer, 64);
// filters_pointer++;
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// #if 1 && defined __MIC__
// __m512 v_filters = _mm512_set1_ps(*filters_pointer);
// {
// res_1 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_1);
// res_2 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_2);
// res_3 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_3);
// res_4 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_4);
// }
// #else
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*(filters_pointer));
// #endif
// inputs_pointer += N;
// } // x
// filters_pointer += (X_const - max_x);
// inputs_pointer = inputs_pointer_y + W_const*N;
// }
// #if 1 && defined __MIC__
// _mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 32), res_3, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 48), res_4, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// #endif
// filters_pointer += (Y_const - max_y)*X_const;
// #else
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// if ((y + h - padding_const >= 0) && (y + h - padding_const < H_const)){ // i.e, are there any elements in this row that overlap with the image?
// for (x = 0; x < X_const; ++x){
// filters_pointer++;
// if ((x + w - padding_const >= 0) && (x + w - padding_const < W_const)){
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*filters_pointer);
// inputs_pointer += N;
// }
// } // x
// inputs_pointer = inputs_pointer_y + W_const*N;
// }
// else filters_pointer += X_const;
// } // y
// #endif
// } // if-else
// } // w
// } // h
// } // c
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// // float *restrict outputs_pointer = SCRATCH + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// float *restrict outputs_pointer = SCRATCH + ti(omp_get_thread_num(), h_output, w_output, 0, output_H_const, output_W_const, BLOCK);
// // int *restrict argmaxs_pointer = ARGMAXS + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// pooled_outputs_pointer[0 : BLOCK] = -1.0e6;
// // float *restrict argmaxs = SCRATCH + K_const*output_H_const*output_W_const*N + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < pooling_radius_const; y++){
// for (x = 0; x < pooling_radius_const; x++){
// if (outputs_pointer[0 : BLOCK] > pooled_outputs_pointer[0 : BLOCK]){
// pooled_outputs_pointer[0 : BLOCK] = outputs_pointer[0 : BLOCK];
// argmaxs_pointer[0 : BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += BLOCK;
// }
// outputs_index += output_W_const - pooling_radius_const;
// outputs_pointer += (output_W_const - pooling_radius_const)*BLOCK;
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// Convolution after changing to [N_BLOCK, C, H, W, BLOCK] data structure, before the intrinsics
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == (output_H_const - pooling_radius_const + 1)/pooling_stride_const);
// assert(pooled_W_const == (output_W_const - pooling_radius_const + 1)/pooling_stride_const);
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k, i, j, h, w, c, y, x;
// int nk, hw, ij, nkhw;
// // computation of constants
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// // SCRATCH[0:K_const*N*output_H_const*output_W_const] = 0.f;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, h, w, c, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // #pragma vector aligned
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/N_BLOCK*K_const; nk++){
// n_block = nk / K_const;
// n = n_block*N_BLOCK;
// k = md(nk, K_const);
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*N_BLOCK : output_H_const*output_W_const*N_BLOCK] = 0.f;
// // float *restrict convolutions = SCRATCH + ti(k, 0, 0, n, output_H_const, output_W_const, N);
// // for (hw = 0; hw < output_H_const*output_W_const; hw++) convolutions[hw*N : N_BLOCK] = 0.f;
// for (c = 0; c < C_const; c++){
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// // float *restrict convolutions = SCRATCH + ti(k, h, w, n, output_H_const, output_W_const, N);
// float *restrict convolutions = SCRATCH + ti(omp_get_thread_num(), h, w, 0, output_H_const, output_W_const, N_BLOCK);
// int kcyx_shift = (k*C_const + c)*Y_const*X_const - 1; // filters
// float *restrict filters_pointer = FILTERS + kcyx_shift;
// // if we're not on boundary (i.e not affected by padding)
// if (w - padding_const >= 0 &&
// h - padding_const >= 0 &&
// output_W_const - 1 - w >= padding_const &&
// output_H_const - 1 - h >= padding_const){
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, h - padding_const, w - padding_const, 0, C_const, H_const, W_const, N_BLOCK);
// for (y = 0; y < Y_const; ++y){
// for (x = 0; x < X_const; ++x){
// convolutions[0 : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*(++filters_pointer));
// inputs_pointer += N_BLOCK;
// } // x
// inputs_pointer += (-X_const + W_const)*N_BLOCK;
// } // y
// }
// else{
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), 0, C_const, H_const, W_const, N_BLOCK);
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// if ((y + h - padding_const >= 0) && (y + h - padding_const < H_const)){ // i.e, are there any elements in this row that overlap with the image?
// for (x = 0; x < X_const; ++x){
// filters_pointer++;
// if ((x + w - padding_const >= 0) && (x + w - padding_const < W_const)){
// convolutions[0 : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*filters_pointer);
// inputs_pointer += N_BLOCK;
// }
// } // x
// inputs_pointer = inputs_pointer_y + W_const*N_BLOCK;
// }
// else filters_pointer += X_const;
// } // y
// } // if-else
// } // w
// } // h
// } // c
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// // float *restrict outputs_pointer = SCRATCH + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// float *restrict outputs_pointer = SCRATCH + ti(omp_get_thread_num(), h_output, w_output, 0, output_H_const, output_W_const, N_BLOCK);
// // int *restrict argmaxs_pointer = ARGMAXS + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
// // float *restrict argmaxs = SCRATCH + K_const*output_H_const*output_W_const*N + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < pooling_radius_const; y++){
// for (x = 0; x < pooling_radius_const; x++){
// if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
// pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
// argmaxs_pointer[0 : N_BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += N_BLOCK;
// }
// outputs_index += output_W_const - pooling_radius_const;
// outputs_pointer += (output_W_const - pooling_radius_const)*N_BLOCK;
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// Convolution after changing to [N_BLOCK, C, H, W, BLOCK] data structure, after the intrinsics
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == (output_H_const - pooling_radius_const + 1)/pooling_stride_const);
// assert(pooled_W_const == (output_W_const - pooling_radius_const + 1)/pooling_stride_const);
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k, i, j, h, w, c, y, x;
// int nk, hw, ij, nkhw;
// // computation of constants
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// // SCRATCH[0:K_const*N*output_H_const*output_W_const] = 0.f;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, h, w, c, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/BLOCK*K_const; nk++){
// n_block = nk / K_const;
// n = n_block*BLOCK;
// k = md(nk, K_const);
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*BLOCK : output_H_const*output_W_const*BLOCK] = 0.f;
// for (c = 0; c < C_const; c++){
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// // float *restrict convolutions = SCRATCH + ti(k, h, w, n, output_H_const, output_W_const, N);
// float *restrict convolutions = SCRATCH + ti(omp_get_thread_num(), h, w, 0, output_H_const, output_W_const, BLOCK);
// float *restrict convolutions_next = SCRATCH + ti(omp_get_thread_num(), h, w+1, 0, output_H_const, output_W_const, BLOCK);
// __assume_aligned(convolutions, 64);
// _mm_prefetch((char *)(convolutions_next), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 16), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 32), _MM_HINT_ET0);
// _mm_prefetch((char *)(convolutions_next + 48), _MM_HINT_ET0);
// int kcyx_shift = (k*C_const + c)*Y_const*X_const - 1; // filters
// float *restrict filters_pointer = FILTERS + kcyx_shift;
// // if we're not on boundary (i.e not affected by padding)
// if (w - padding_const >= 0 &&
// h - padding_const >= 0 &&
// output_W_const - 1 - w >= padding_const &&
// output_H_const - 1 - h >= padding_const){
// #if 1 && defined __MIC__
// __m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_3 = _mm512_extload_ps(convolutions + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_4 = _mm512_extload_ps(convolutions + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// #endif
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, h - padding_const, w - padding_const, 0, C_const, H_const, W_const, BLOCK);
// for (y = 0; y < Y_const; ++y){
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// for (x = 0; x < X_const; ++x)
// {
// __assume_aligned(inputs_pointer, 64);
// filters_pointer++;
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// #if 1 && defined __MIC__
// __m512 v_filters = _mm512_set1_ps(*filters_pointer);
// {
// res_1 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_1);
// res_2 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_2);
// res_3 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_3);
// res_4 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_4);
// }
// #else
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*(filters_pointer));
// #endif
// inputs_pointer += BLOCK;
// } // x
// inputs_pointer += (-X_const + W_const)*BLOCK;
// } // y
// #if 1 && defined __MIC__
// _mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 32), res_3, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 48), res_4, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// #endif
// }
// else{
// #if 1 && defined __MIC__
// __m512 res_1 = _mm512_extload_ps(convolutions, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_2 = _mm512_extload_ps(convolutions + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_3 = _mm512_extload_ps(convolutions + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// __m512 res_4 = _mm512_extload_ps(convolutions + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE);
// #endif
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), 0, C_const, H_const, W_const, BLOCK);
// int min_x = mx(0, (padding_const - w));
// int max_x = mn(X_const, (W_const + padding_const - w));
// int min_y = mx(0, (padding_const - h));
// int max_y = mn(Y_const, (H_const + padding_const - h));
// #if 1
// filters_pointer += min_y*X_const;
// for (y = min_y; y < max_y; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// filters_pointer += min_x;
// #pragma unroll (X_const-padding_const)
// #pragma noprefetch
// for (x = min_x; x < max_x; ++x)
// {
// __assume_aligned(inputs_pointer, 64);
// filters_pointer++;
// _mm_prefetch((char *)(inputs_pointer + N), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 16), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 32), _MM_HINT_T0);
// _mm_prefetch((char *)(inputs_pointer + N + 48), _MM_HINT_T0);
// #if 1 && defined __MIC__
// __m512 v_filters = _mm512_set1_ps(*filters_pointer);
// {
// res_1 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 0, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_1);
// res_2 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 16, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_2);
// res_3 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 32, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_3);
// res_4 = _mm512_fmadd_ps(_mm512_extload_ps(inputs_pointer + 48, _MM_UPCONV_PS_NONE, _MM_BROADCAST32_NONE, _MM_HINT_NONE), v_filters, res_4);
// }
// #else
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*(filters_pointer));
// #endif
// inputs_pointer += BLOCK;
// } // x
// filters_pointer += (X_const - max_x);
// inputs_pointer = inputs_pointer_y + W_const*BLOCK;
// }
// #if 1 && defined __MIC__
// _mm512_extstore_ps((float *)(convolutions), res_1, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 16), res_2, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 32), res_3, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// _mm512_extstore_ps((float *)(convolutions + 48), res_4, _MM_DOWNCONV_PS_NONE, _MM_HINT_NONE);
// #endif
// filters_pointer += (Y_const - max_y)*X_const;
// #else
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// if ((y + h - padding_const >= 0) && (y + h - padding_const < H_const)){ // i.e, are there any elements in this row that overlap with the image?
// for (x = 0; x < X_const; ++x){
// filters_pointer++;
// if ((x + w - padding_const >= 0) && (x + w - padding_const < W_const)){
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*filters_pointer);
// inputs_pointer += BLOCK;
// }
// } // x
// inputs_pointer = inputs_pointer_y + W_const*BLOCK;
// }
// else filters_pointer += X_const;
// } // y
// #endif
// } // if-else
// } // w
// } // h
// } // c
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// // float *restrict outputs_pointer = SCRATCH + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// float *restrict outputs_pointer = SCRATCH + ti(omp_get_thread_num(), h_output, w_output, 0, output_H_const, output_W_const, BLOCK);
// // int *restrict argmaxs_pointer = ARGMAXS + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k, h, w, 0, K_const, pooled_H_const, pooled_W_const, BLOCK);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k, h, w, 0, K_const, pooled_H_const, pooled_W_const, BLOCK);
// pooled_outputs_pointer[0 : BLOCK] = -1.0e6;
// // float *restrict argmaxs = SCRATCH + K_const*output_H_const*output_W_const*N + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < pooling_radius_const; y++){
// for (x = 0; x < pooling_radius_const; x++){
// if (outputs_pointer[0 : BLOCK] > pooled_outputs_pointer[0 : BLOCK]){
// pooled_outputs_pointer[0 : BLOCK] = outputs_pointer[0 : BLOCK];
// argmaxs_pointer[0 : BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += BLOCK;
// }
// outputs_index += output_W_const - pooling_radius_const;
// outputs_pointer += (output_W_const - pooling_radius_const)*BLOCK;
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// convolution after interleaving FILTERS and INPUTS and OUTPUTS
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == (output_H_const - pooling_radius_const + 1)/pooling_stride_const);
// assert(pooled_W_const == (output_W_const - pooling_radius_const + 1)/pooling_stride_const);
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k_block, k, i, j, h, w, c, y, x;
// int nk, hw, ij, nkhw;
// // computation of constants
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, k_block, h, w, c, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // #pragma vector aligned
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/N_BLOCK*K_const/K_BLOCK; nk++){
// n_block = nk / (K_const/K_BLOCK);
// n = n_block*N_BLOCK;
// k_block = md(nk, K_const/K_BLOCK);
// k = k_block*K_BLOCK;
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*N_BLOCK*K_BLOCK : output_H_const*output_W_const*N_BLOCK*K_BLOCK] = 0.f;
// for (c = 0; c < C_const; c++){
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
// float *restrict filters_pointer = FILTERS + ti5(k_block, c, 0, 0, 0, C_const, Y_const, X_const, K_BLOCK); //(k*C_const + c)*Y_const*X_const - 1;
// // if we're not on boundary (i.e not affected by padding)
// if (w - padding_const >= 0 &&
// h - padding_const >= 0 &&
// output_W_const - 1 - w >= padding_const &&
// output_H_const - 1 - h >= padding_const){
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, h - padding_const, w - padding_const, 0, C_const, H_const, W_const, N_BLOCK);
// for (y = 0; y < Y_const; ++y){
// for (x = 0; x < X_const; ++x){
// for (int kk = 0; kk < K_BLOCK; kk++){
// convolutions[kk*N_BLOCK : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*filters_pointer);
// filters_pointer++;
// }
// inputs_pointer += N_BLOCK;
// } // x
// inputs_pointer += (-X_const + W_const)*N_BLOCK;
// } // y
// }
// else{
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), 0, C_const, H_const, W_const, N_BLOCK);
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// if ((y + h - padding_const >= 0) && (y + h - padding_const < H_const)){ // i.e, are there any elements in this row that overlap with the image?
// for (x = 0; x < X_const; ++x){
// if ((x + w - padding_const >= 0) && (x + w - padding_const < W_const)){
// for (int kk = 0; kk < K_BLOCK; kk++){
// convolutions[kk*N_BLOCK : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*filters_pointer);
// filters_pointer++;
// }
// inputs_pointer += N_BLOCK;
// }
// else filters_pointer += K_BLOCK;
// } // x
// inputs_pointer = inputs_pointer_y + W_const*N_BLOCK;
// }
// else filters_pointer += X_const*K_BLOCK;
// } // y
// } // if-else
// } // w
// } // h
// } // c
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// for (int kk = 0; kk < K_BLOCK; kk++){
// float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
// int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
// // float *restrict argmaxs = SCRATCH + K_const*output_H_const*output_W_const*N + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < pooling_radius_const; y++){
// for (x = 0; x < pooling_radius_const; x++){
// if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
// pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
// argmaxs_pointer[0 : N_BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += K_BLOCK*N_BLOCK;
// }
// outputs_index += output_W_const - pooling_radius_const;
// outputs_pointer += (output_W_const - pooling_radius_const)*K_BLOCK*N_BLOCK;
// }
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// convolution after interleaving N and K and blocking C, before intrinsics
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == ceil((output_H_const - pooling_radius_const + 1.f)/pooling_stride_const));
// assert(pooled_W_const == ceil((output_W_const - pooling_radius_const + 1.f)/pooling_stride_const));
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k_block, k, i, j, h, w, c, c_block, y, x;
// int nk, hw, ij, nkhw;
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, k_block, h, w, c, c_block, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/N_BLOCK*K_const/K_BLOCK; nk++){
// n_block = nk / (K_const/K_BLOCK);
// n = n_block*N_BLOCK;
// k_block = md(nk, K_const/K_BLOCK);
// k = k_block*K_BLOCK;
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*N_BLOCK*K_BLOCK : output_H_const*output_W_const*N_BLOCK*K_BLOCK] = 0.f;
// for (c_block = 0; c_block < C_const/C_BLOCK; c_block++){
// c = c_block*C_BLOCK;
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// float *restrict convolutions = SCRATCH + ti5(omp_get_thread_num(), h, w, 0, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
// for (y = 0; y < Y_const; ++y){
// for (x = 0; x < X_const; ++x){
// if ((h + y - padding_const >= 0) && (h + y - padding_const < H_const) && (w + x - padding_const >= 0) && (w + x - padding_const < W_const)){
// float *restrict filters_pointer = FILTERS + ti5(k_block, c, y, x, 0, C_const, Y_const, X_const, K_BLOCK);
// float *restrict inputs_pointer = INPUTS + ti5(n_block, c, h + y - padding_const, w + x - padding_const, 0, C_const, H_const, W_const, N_BLOCK);
// for (int cc = 0; cc < C_BLOCK; cc++){
// for (int kk = 0; kk < K_BLOCK; kk++){
// convolutions[kk*N_BLOCK : N_BLOCK] += inputs_pointer[0 : N_BLOCK] * (*filters_pointer);
// filters_pointer++;
// } //kk
// filters_pointer += Y_const*X_const*K_BLOCK - K_BLOCK;
// inputs_pointer += H_const*W_const*N_BLOCK;
// } // cc
// } // if
// } // x
// } // y
// } // w
// } // h
// } // c_block
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// int window_width = pooling_radius_const - mx(w_output + pooling_radius_const - output_W_const, 0);
// int window_height = pooling_radius_const - mx(h_output + pooling_radius_const - output_H_const, 0);
// for (int kk = 0; kk < K_BLOCK; kk++){
// float *restrict outputs_pointer = SCRATCH + ti5(omp_get_thread_num(), h_output, w_output, kk, 0, output_H_const, output_W_const, K_BLOCK, N_BLOCK);
// int *restrict argmaxs_pointer = ARGMAXS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti5(n_block, k + kk, h, w, 0, K_const, pooled_H_const, pooled_W_const, N_BLOCK);
// pooled_outputs_pointer[0 : N_BLOCK] = -1.0e6;
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < window_height; y++){
// for (x = 0; x < window_width; x++){
// if (outputs_pointer[0 : N_BLOCK] > pooled_outputs_pointer[0 : N_BLOCK]){
// pooled_outputs_pointer[0 : N_BLOCK] = outputs_pointer[0 : N_BLOCK];
// argmaxs_pointer[0 : N_BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += K_BLOCK*N_BLOCK;
// }
// outputs_index += output_W_const - window_width;
// outputs_pointer += (output_W_const - window_width)*K_BLOCK*N_BLOCK;
// }
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// convolution for [C, H, W, N] data structure
// int *convolution_layer1(int N, int C, int H, int W, float *restrict INPUTS, int K, int Y, int X, float *restrict FILTERS, float *restrict OUTPUTS, int *restrict ARGMAXS, int stride, int padding, int pooling_radius, int pooling_stride, int offloaded, float *SCRATCH){
// assert(C == C_const);
// assert(H == H_const);
// assert(W == W_const);
// assert(K == K_const);
// assert(stride == stride_const);
// assert(padding == padding_const);
// assert(pooling_radius == pooling_radius_const);
// assert(pooling_stride == pooling_stride_const);
// assert(X == X_const);
// assert(Y == Y_const);
// assert(output_H_const == (H_const + 2*padding_const - Y_const + 1)/stride_const);
// assert(output_W_const == (W_const + 2*padding_const - X_const + 1)/stride_const);
// assert(pooled_H_const == (output_H_const - pooling_radius_const + 1)/pooling_stride_const);
// assert(pooled_W_const == (output_W_const - pooling_radius_const + 1)/pooling_stride_const);
// #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(OUTPUTS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int n_block, n, k, i, j, h, w, c, y, x;
// int nk, hw, ij, nkhw;
// // computation of constants
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// // SCRATCH[0:K_const*N*output_H_const*output_W_const] = 0.f;
// #pragma omp parallel for \
// schedule(dynamic) \
// default(none) \
// private(nk, hw, ij, n_block, n, k, h, w, c, y, x, i, j) \
// shared(N, INPUTS, OUTPUTS, FILTERS, ARGMAXS, SCRATCH, XWN, HYWN)
// // #pragma vector aligned
// // ~=~=~=~=~=~=~=~= CONVOLUTION ~=~=~=~=~=~=~=~=
// for (nk = 0; nk < N/BLOCK*K_const; nk++){
// n_block = nk / K_const;
// n = n_block*BLOCK;
// k = md(nk, K_const);
// SCRATCH[omp_get_thread_num()*output_H_const*output_W_const*BLOCK : output_H_const*output_W_const*BLOCK] = 0.f;
// // float *restrict convolutions = SCRATCH + ti(k, 0, 0, n, output_H_const, output_W_const, N);
// // for (hw = 0; hw < output_H_const*output_W_const; hw++) convolutions[hw*N : BLOCK] = 0.f;
// for (c = 0; c < C_const; c++){
// for (h = 0; h < output_H_const; h++){
// for (w = 0; w < output_W_const; w++){
// // float *restrict convolutions = SCRATCH + ti(k, h, w, n, output_H_const, output_W_const, N);
// float *restrict convolutions = SCRATCH + ti(omp_get_thread_num(), h, w, 0, output_H_const, output_W_const, BLOCK);
// int kcyx_shift = (k*C_const + c)*Y_const*X_const - 1; // filters
// float *restrict filters_pointer = FILTERS + kcyx_shift;
// // if we're not on boundary (i.e not affected by padding)
// if (w - padding_const >= 0 &&
// h - padding_const >= 0 &&
// output_W_const - 1 - w >= padding_const &&
// output_H_const - 1 - h >= padding_const){
// float *restrict inputs_pointer = INPUTS + ti(c, h - padding_const, w - padding_const, n, H_const, W_const, N);
// for (y = 0; y < Y_const; ++y){
// for (x = 0; x < X_const; ++x){
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*(++filters_pointer));
// inputs_pointer += N;
// } // x
// inputs_pointer += XWN;
// } // y
// }
// else{
// float *restrict inputs_pointer = INPUTS + ti(c, mx(mn(h-padding_const, H_const-1), 0), mx(mn(w-padding_const, W_const-1), 0), n, H_const, W_const, N);
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer; // start-of-line pointer
// if ((y + h - padding_const >= 0) && (y + h - padding_const < H_const)){ // i.e, are there any elements in this row that overlap with the image?
// for (x = 0; x < X_const; ++x){
// filters_pointer++;
// if ((x + w - padding_const >= 0) && (x + w - padding_const < W_const)){
// convolutions[0 : BLOCK] += inputs_pointer[0 : BLOCK] * (*filters_pointer);
// inputs_pointer += N;
// }
// } // x
// inputs_pointer = inputs_pointer_y + W_const*N;
// }
// else filters_pointer += X_const;
// } // y
// } // if-else
// } // w
// } // h
// } // c
// // ~=~=~=~=~=~=~=~= POOLING ~=~=~=~=~=~=~=~=
// for (h = 0; h < pooled_H_const; h++){
// for (w = 0; w < pooled_W_const; w++){
// int h_output = h*pooling_stride_const;
// int w_output = w*pooling_stride_const;
// // float *restrict outputs_pointer = SCRATCH + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// float *restrict outputs_pointer = SCRATCH + ti(omp_get_thread_num(), h_output, w_output, 0, output_H_const, output_W_const, BLOCK);
// // int *restrict argmaxs_pointer = ARGMAXS + ti(k, h_output, w_output, n, output_H_const, output_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// float *restrict pooled_outputs_pointer = OUTPUTS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// pooled_outputs_pointer[0 : BLOCK] = -1.0e6;
// // float *restrict argmaxs = SCRATCH + K_const*output_H_const*output_W_const*N + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int outputs_index = h_output*output_W_const + w_output;
// for (y = 0; y < pooling_radius_const; y++){
// for (x = 0; x < pooling_radius_const; x++){
// if (outputs_pointer[0 : BLOCK] > pooled_outputs_pointer[0 : BLOCK]){
// pooled_outputs_pointer[0 : BLOCK] = outputs_pointer[0 : BLOCK];
// argmaxs_pointer[0 : BLOCK] = outputs_index;
// }
// outputs_index++;
// outputs_pointer += BLOCK;
// }
// outputs_index += output_W_const - pooling_radius_const;
// outputs_pointer += (output_W_const - pooling_radius_const)*BLOCK;
// }
// }
// }
// } //nk
// } // pragma_offload
// }
// gradient before any optimization
// void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(D_POOLED_OUTPUTS:length(0) REUSE) \
// in(D_FILTERS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int khw, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp;
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// private(khw, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp) \
// shared(N, XWN, HYWN, INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH)
// for (khw = 0; khw < K_const*pooled_H_const*pooled_W_const; khw++){
// k = khw / (pooled_H_const*pooled_W_const);
// h = md(khw, pooled_H_const*pooled_W_const) / pooled_W_const;
// w = md(khw, pooled_W_const);
// float *d_filters_tmp = SCRATCH + omp_get_thread_num()*C_const*Y_const*X_const;
// d_filters_tmp[0 : C_const*Y_const*X_const] = 0.f;
// float *restrict d_filters_pointer = d_filters_tmp; // D_FILTERS + k*C_const*Y_const*X_const;
// for (n = 0; n < N; n++){
// float *restrict d_pooled_outputs_pointer = D_POOLED_OUTPUTS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// lin_index = *argmaxs_pointer;
// h_arg = lin_index/output_W_const;
// w_arg = lin_index - h_arg*output_W_const;
// if ((w_arg - padding_const >= 0) &&
// (h_arg - padding_const >= 0) &&
// (output_W_const - 1 - w_arg >= padding_const) &&
// (output_H_const - 1 - h_arg >= padding_const)){
// float *restrict inputs_pointer = INPUTS + ti(0, h_arg - padding_const, w_arg - padding_const, n, H_const, W_const, N);
// for (c = 0; c < C_const; ++c){
// for (y = 0; y < Y_const; ++y){
// for (x = 0; x < X_const; ++x){
// *d_filters_pointer += (*inputs_pointer) * (*d_pooled_outputs_pointer);
// // *d_inputs_pointer += (*d_pooled_outputs_pointer) * (*filters_pointer);
// d_filters_pointer++;
// inputs_pointer += N;
// } // x
// inputs_pointer += XWN;
// } // y
// inputs_pointer += HYWN;
// } //c
// }
// else{
// float *restrict inputs_pointer = INPUTS + ti(0, mx(mn(h_arg-padding_const, H_const-1), 0), mx(mn(w_arg-padding_const, W_const-1), 0), n, H_const, W_const, N);
// for (c = 0; c < C_const; ++c){
// for (y = 0; y < Y_const; ++y){
// float *restrict inputs_pointer_y = inputs_pointer;
// if ((y + h_arg - padding_const >= 0) && (y + h_arg - padding_const < H_const)){
// for (x = 0; x < X_const; ++x){
// if ((x + w_arg - padding_const >= 0) && (x + w_arg - padding_const < W_const)){
// *d_filters_pointer += (*d_pooled_outputs_pointer) * (*inputs_pointer);
// inputs_pointer += N;
// }
// d_filters_pointer++;
// } // x
// }
// else d_filters_pointer += X_const; // advance pointer without going into loop
// inputs_pointer = inputs_pointer_y + W_const*N;
// } // y
// inputs_pointer += HYWN;
// } // c
// }
// d_filters_pointer = D_FILTERS + k*C_const*Y_const*X_const;
// d_filters_pointer[0 : C_const*Y_const*X_const] += d_filters_tmp[0 : C_const*Y_const*X_const];
// } // n
// } // khw
// } // pragma offload
// }
// void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(D_POOLED_OUTPUTS:length(0) REUSE) \
// in(D_FILTERS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int nkhw, n_block, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp;
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// SCRATCH[0 : K_const*output_H_const*output_W_const*N] = 0.f;
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// private(n) \
// shared(N, ARGMAXS, SCRATCH)
// for (n = 0; n < K_const*pooled_H_const*pooled_W_const*N; n++){
// int argmax = ARGMAXS[n];
// SCRATCH[argmax] += 1.f;
// }
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// private(nkhw, n_block, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp) \
// shared(N, XWN, HYWN, INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH)
// for (nkhw = 0; nkhw < K_const*output_H_const*output_W_const*N/BLOCK; nkhw++){
// n_block = nkhw / (K_const*output_H_const*output_W_const);
// n = n_block*BLOCK;
// k = md(nkhw, K_const*output_H_const*output_W_const) / (output_H_const*output_W_const);
// h = md(nkhw, output_H_const*output_W_const) / output_W_const;
// w = md(nkhw, output_W_const);
// float *restrict d_filters_pointer = D_FILTERS + k*C_const*Y_const*X_const;
// int h_pool = h / pooling_stride_const;
// int w_pool = w / pooling_stride_const;
// float *restrict d_pooled_outputs_pointer = D_POOLED_OUTPUTS + ti(k, h_pool, w_pool, n, pooled_H_const, pooled_W_const, N);
// // need to modify this to real argmaxs array, which is over the outputs and for each element has the count of the number of times it's the argmax
// float *restrict argmaxs_pointer = SCRATCH + ti(k, h, w, n, output_H_const, output_W_const, N);
// if ((w - padding_const >= 0) &&
// (h - padding_const >= 0) &&
// (output_W_const - 1 - w >= padding_const) &&
// (output_H_const - 1 - h >= padding_const)){
// float *restrict inputs_pointer = INPUTS + ti(0, h, w, n, H_const, W_const, N);
// for (c = 0; c < C_const; c++){
// for (y = 0; y < Y_const; y++){
// for (x = 0; x < X_const; x++){
// *d_filters_pointer += __sec_reduce_add(inputs_pointer[0 : BLOCK] * argmaxs_pointer[0 : BLOCK] * d_pooled_outputs_pointer[0 : BLOCK]);
// d_filters_pointer++;
// inputs_pointer += N;
// } // x
// inputs_pointer += XWN;
// } // y
// inputs_pointer += HYWN;
// }
// }
// // else{
// // float *restrict inputs_pointer = INPUTS + cHWn - 1 + mx(mn(h_arg-padding_const, H_const-1), 0)*W_const + mx(mn(w_arg-padding_const, W_const-1), 0);
// // for (y = 0; y < Y_const; ++y){
// // float *restrict inputs_pointer_ncyX = inputs_pointer; // start-of-line pointer
// // if ((y + h_arg - padding_const >= 0) && (y + h_arg - padding_const < H_const)){
// // for (x = 0; x < X_const; ++x){
// // d_filters_pointer++;
// // if ((x + w_arg - padding_const >= 0) && (x + w_arg - padding_const < W_const))
// // *d_filters_pointer += (*d_pooled_outputs_pointer) * (*(++inputs_pointer));
// // } // x
// // }
// // else d_filters_pointer += X_const; // advance pointer without going into loop
// // inputs_pointer = inputs_pointer_ncyX + W_const;
// // } // y
// // }
// } // nkhw
// }
// }
// // gradient for inputs data structure [N, H, W, C]
// void convolution_gradient_layer1(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, int padding, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, float *D_INPUTS, float *D_FILTERS, float *SCRATCH){
// #pragma offload target(mic:MIC_DEV) \
// in(INPUTS:length(0) REUSE) \
// in(FILTERS:length(0) REUSE) \
// in(ARGMAXS:length(0) REUSE) \
// in(D_POOLED_OUTPUTS:length(0) REUSE) \
// in(D_FILTERS:length(0) REUSE) \
// in(SCRATCH:length(0) REUSE)
// {
// int khw, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp;
// int XWN = (-X_const + W_const)*N,
// HYWN = (H_const-Y_const)*W_const*N;
// #pragma omp parallel for \
// default(none) \
// schedule(dynamic) \
// private(khw, h, w, k, c, lin_index, y, x, h_arg, w_arg, n, tmp) \
// shared(N, XWN, HYWN, INPUTS, ARGMAXS, FILTERS, D_POOLED_OUTPUTS, D_FILTERS, SCRATCH)
// for (khw = 0; khw < K_const*pooled_H_const*pooled_W_const; khw++){
// k = khw / (pooled_H_const*pooled_W_const);
// h = md(khw, pooled_H_const*pooled_W_const) / pooled_W_const;
// w = md(khw, pooled_W_const);
// float *restrict d_filters_tmp = SCRATCH + omp_get_thread_num()*Y_const*X_const*C_const;
// d_filters_tmp[0 : C_const*Y_const*X_const] = 0.f;
// float *restrict d_filters_pointer = d_filters_tmp; // D_FILTERS + k*C_const*Y_const*X_const;
// for (n = 0; n < N; n++){
// float *restrict d_pooled_outputs_pointer = D_POOLED_OUTPUTS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// int *restrict argmaxs_pointer = ARGMAXS + ti(k, h, w, n, pooled_H_const, pooled_W_const, N);
// lin_index = *argmaxs_pointer;
// h_arg = lin_index/output_W_const;
// w_arg = lin_index - h_arg*output_W_const;
// // float *restrict inputs_pointer = INPUTS + ti(n, h_arg - padding_const, w_arg - padding_const, 0, H_const, W_const, C_const);
// float *restrict inputs_pointer = INPUTS + ti(n, mx(mn(h_arg-padding_const, H_const-1), 0), mx(mn(w_arg-padding_const, W_const-1), 0), 0, H_const, W_const, C_const);
// const int x_invalid_left = mx(padding_const - w_arg, 0);
// const int x_invalid_right = mx(w_arg - padding_const + X_const - W_const, 0);
// const int x_len = X_const - x_invalid_left - x_invalid_right;
// for (y = 0; y < Y_const; ++y){
// if ((y + h_arg - padding_const >= 0) && (y + h_arg - padding_const < H_const)){
// d_filters_pointer[x_invalid_left*C_const : x_len*C_const] += inputs_pointer[0 : x_len*C_const] * (*d_pooled_outputs_pointer);
// d_filters_pointer += X_const*C_const;
// inputs_pointer += W_const*C_const;
// } // if
// } // y
// d_filters_pointer = D_FILTERS + k*Y_const*X_const*C_const;
// d_filters_pointer[0 : Y_const*X_const*C_const] += d_filters_tmp[0 : Y_const*X_const*C_const];
// } // n
// } // khw
// } // pragma offload
// }
// void *initialize_locks(){
// void *writelock[C_BLOCK_GRAD*Y_BLOCK_GRAD*16];
// #pragma offload target(mic:MIC_DEV) \
// in(writelock:length(0))
// {
// omp_lock_t *writelock_casted = (omp_lock_t *) writelock;
// for(int i = 0; i < C_BLOCK_GRAD*Y_BLOCK_GRAD; i++)
// omp_init_lock(&writelock_casted[16*i]);
// }
// return (void *)writelock;
// }
// computes FFT/iFFT of N contiguous HxW maps
void fft(int N, int H, int W, int IS_FORWARD, float *restrict SPATIALS, float complex *restrict FREQUENCIES){
#pragma offload target(mic:MIC_DEV) \
in(SPATIALS:length(0) REUSE) \
in(FREQUENCIES:length(0) REUSE)
{
int NUM_THREADS = 128;
int N_THREAD = N/NUM_THREADS;
int N_ALIGNED = (N/NUM_THREADS) * NUM_THREADS;
int N_REMAINING = N - N_ALIGNED;
int thread;
#pragma omp parallel for \
private(thread) \
shared(N, H, W, IS_FORWARD, SPATIALS, FREQUENCIES, NUM_THREADS, N_THREAD)
for (thread = 0; thread < NUM_THREADS; thread++){
float *restrict spatials_pointer = SPATIALS + thread*N_THREAD*H*W;
float complex *restrict frequencies_pointer = FREQUENCIES + thread*N_THREAD*H*(W/2 + 1);
DFTI_DESCRIPTOR_HANDLE hand; MKL_LONG status;
MKL_LONG transform_dimensions[] = {H, W};
MKL_LONG spatial_strides[] = {0, W, 1}; // spatial shape: HxW
MKL_LONG frequency_strides[] = {0, W/2 + 1, 1}; // frequency shape: H*(floor(W/2) + 1)
status = DftiCreateDescriptor(&hand, DFTI_SINGLE, DFTI_REAL, 2, transform_dimensions);
status = DftiSetValue(hand, DFTI_PLACEMENT, DFTI_NOT_INPLACE);
float scale = pow((float) H*W, -0.5);
status = DftiSetValue(hand, DFTI_FORWARD_SCALE, scale);
status = DftiSetValue(hand, DFTI_BACKWARD_SCALE, scale);
status = DftiSetValue(hand, DFTI_NUMBER_OF_TRANSFORMS, N_THREAD);
status = DftiSetValue(hand, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX);
if (IS_FORWARD == 1){
status = DftiSetValue(hand, DFTI_INPUT_STRIDES, spatial_strides);
status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, frequency_strides);
status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*W);
status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*(W/2 + 1));
}
else{
status = DftiSetValue(hand, DFTI_INPUT_STRIDES, frequency_strides);
status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, spatial_strides);
status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*(W/2 + 1));
status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*W);
}
status = DftiCommitDescriptor(hand);
if (IS_FORWARD == 1) status = DftiComputeForward(hand, spatials_pointer, frequencies_pointer);
else status = DftiComputeBackward(hand, frequencies_pointer, spatials_pointer);
status = DftiFreeDescriptor(&hand);
}
// transform remaining ones
if (N_REMAINING > 0){
float *restrict spatials_pointer = SPATIALS + N_ALIGNED*H*W;
float complex *restrict frequencies_pointer = FREQUENCIES + N_ALIGNED*H*(W/2 + 1);
DFTI_DESCRIPTOR_HANDLE hand; MKL_LONG status;
MKL_LONG transform_dimensions[] = {H, W};
MKL_LONG spatial_strides[] = {0, W, 1}; // spatial shape: HxW
MKL_LONG frequency_strides[] = {0, W/2 + 1, 1}; // frequency shape: H*(floor(W/2) + 1)
status = DftiCreateDescriptor(&hand, DFTI_SINGLE, DFTI_REAL, 2, transform_dimensions);
status = DftiSetValue(hand, DFTI_PLACEMENT, DFTI_NOT_INPLACE);
float scale = pow((float) H*W, -0.5);
status = DftiSetValue(hand, DFTI_FORWARD_SCALE, scale);
status = DftiSetValue(hand, DFTI_BACKWARD_SCALE, scale);
status = DftiSetValue(hand, DFTI_NUMBER_OF_TRANSFORMS, N_REMAINING);
status = DftiSetValue(hand, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX);
if (IS_FORWARD == 1){
status = DftiSetValue(hand, DFTI_INPUT_STRIDES, spatial_strides);
status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, frequency_strides);
status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*W);
status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*(W/2 + 1));
}
else{
status = DftiSetValue(hand, DFTI_INPUT_STRIDES, frequency_strides);
status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, spatial_strides);
status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*(W/2 + 1));
status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*W);
}
status = DftiCommitDescriptor(hand);
if (IS_FORWARD == 1) status = DftiComputeForward(hand, spatials_pointer, frequencies_pointer);
else status = DftiComputeBackward(hand, frequencies_pointer, spatials_pointer);
status = DftiFreeDescriptor(&hand);
}
}
}
// void fft(int N, int H, int W, int IS_FORWARD, float *restrict SPATIALS, float complex *restrict FREQUENCIES){
// #pragma offload target(mic:MIC_DEV) \
// in(SPATIALS:length(0) REUSE) \
// in(FREQUENCIES:length(0) REUSE)
// {
// DFTI_DESCRIPTOR_HANDLE hand; MKL_LONG status;
// MKL_LONG transform_dimensions[] = {H, W};
// MKL_LONG spatial_strides[] = {0, W, 1}; // spatial shape: HxW
// MKL_LONG frequency_strides[] = {0, W/2 + 1, 1}; // frequency shape: H*(floor(W/2) + 1)
// // printf("Starting.\n");
// status = DftiCreateDescriptor(&hand, DFTI_SINGLE, DFTI_REAL, 2, transform_dimensions);
// // printf("Create %d\n", status);
// status = DftiSetValue(hand, DFTI_PLACEMENT, DFTI_NOT_INPLACE);
// // printf("place %d\n", status);
// float scale = pow((float) H*W, -0.5);
// status = DftiSetValue(hand, DFTI_FORWARD_SCALE, scale);
// // printf("forward %d\n", status);
// status = DftiSetValue(hand, DFTI_BACKWARD_SCALE, scale);
// // printf("backward %d\n", status);
// status = DftiSetValue(hand, DFTI_NUMBER_OF_TRANSFORMS, N);
// // printf("num %d\n", status);
// status = DftiSetValue(hand, DFTI_THREAD_LIMIT, 236);
// status = DftiSetValue(hand, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX);
// // printf("storage %d\n", status);
// if (IS_FORWARD == 1){
// status = DftiSetValue(hand, DFTI_INPUT_STRIDES, spatial_strides);
// // printf("in strides %d\n", status);
// status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, frequency_strides);
// // printf("out strides %d\n", status);
// status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*W);
// // printf("input %d\n", status);
// status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*(W/2 + 1));
// // printf("output %d\n", status);
// }
// else{
// status = DftiSetValue(hand, DFTI_INPUT_STRIDES, frequency_strides);
// status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, spatial_strides);
// status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*(W/2 + 1));
// status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*W);
// }
// status = DftiCommitDescriptor(hand);
// // printf("commit %d\n", status);
// if (IS_FORWARD == 1) status = DftiComputeForward(hand, SPATIALS, FREQUENCIES);
// else status = DftiComputeBackward(hand, FREQUENCIES, SPATIALS);
// // printf("fft %d\n", status);
// status = DftiFreeDescriptor(&hand);
// // printf("free %d\n", status);
// }
// }
void fft_full(int N, int H, int W, int IS_FORWARD, float *restrict SPATIALS, float complex *restrict FREQUENCIES){
#pragma offload target(mic:MIC_DEV) \
in(SPATIALS:length(0) REUSE) \
in(FREQUENCIES:length(0) REUSE)
{
DFTI_DESCRIPTOR_HANDLE hand; MKL_LONG status;
MKL_LONG transform_dimensions[] = {H, W};
MKL_LONG spatial_strides[] = {0, W, 1}; // spatial shape: HxW
MKL_LONG frequency_strides[] = {0, W, 1}; // frequency shape: H*(floor(W/2) + 1)
// printf("Starting.\n");
status = DftiCreateDescriptor(&hand, DFTI_SINGLE, DFTI_COMPLEX, 2, transform_dimensions);
// printf("Create %d\n", status);
status = DftiSetValue(hand, DFTI_PLACEMENT, DFTI_NOT_INPLACE);
// printf("place %d\n", status);
float scale = pow((float) H*W, -0.5);
status = DftiSetValue(hand, DFTI_FORWARD_SCALE, scale);
// printf("forward %d\n", status);
status = DftiSetValue(hand, DFTI_BACKWARD_SCALE, scale);
// printf("backward %d\n", status);
status = DftiSetValue(hand, DFTI_NUMBER_OF_TRANSFORMS, N);
// printf("num %d\n", status);
status = DftiSetValue(hand, DFTI_THREAD_LIMIT, 236);
status = DftiSetValue(hand, DFTI_COMPLEX_STORAGE, DFTI_COMPLEX_COMPLEX);
// printf("storage %d\n", status);
status = DftiSetValue(hand, DFTI_INPUT_DISTANCE, H*W);
// printf("input %d\n", status);
status = DftiSetValue(hand, DFTI_OUTPUT_DISTANCE, H*W);
status = DftiSetValue(hand, DFTI_INPUT_STRIDES, frequency_strides);
status = DftiSetValue(hand, DFTI_OUTPUT_STRIDES, spatial_strides);
// printf("input %d\n", status);
status = DftiCommitDescriptor(hand);
// printf("commit %d\n", status);
if (IS_FORWARD == 1) status = DftiComputeForward(hand, SPATIALS, FREQUENCIES);
else status = DftiComputeBackward(hand, FREQUENCIES, SPATIALS);
// printf("fft %d\n", status);
status = DftiFreeDescriptor(&hand);
// printf("free %d\n", status);
}
}
void cmult(int N, int K, int C, int HW, float complex *restrict INPUTS, float complex *restrict FILTERS, float complex *restrict OUTPUTS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE)
{
int nk;
#pragma omp parallel for private(nk) shared(INPUTS, FILTERS, OUTPUTS, N, K, C, HW)
for (nk = 0; nk < N*K; nk++){
int n = nk/K;
int k = md(nk, K);
assert(n*K + k == nk);
OUTPUTS[nk*HW : HW] = (float complex) (0.f + I*0.f);
for (int c = 0; c < C; c++){
// for (int hw = 0; hw < HW; hw++){
// OUTPUTS[ti3(n, k, hw, K, HW)] += FILTERS[ti3(k, c, hw, C, HW)] * INPUTS[ti3(n, c, hw, C, HW)];
// }
OUTPUTS[nk*HW : HW] += FILTERS[(k*C + c)*HW : HW] * INPUTS[(n*C + c)*HW : HW];
}
}
}
}
void cmult_gradient(int N, int K, int C, int HW, float complex *restrict INPUTS, float complex *restrict GRADIENT_INPUTS, float complex *restrict FILTERS, float complex *restrict GRADIENT_FILTERS, float complex *restrict GRADIENT_OUTPUTS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(GRADIENT_INPUTS:length(0) REUSE) \
in(FILTERS:length(0) REUSE) \
in(GRADIENT_FILTERS:length(0) REUSE) \
in(GRADIENT_OUTPUTS:length(0) REUSE)
{
// int nk;
int kc;
#pragma omp parallel for private(kc) shared(INPUTS, GRADIENT_INPUTS, FILTERS, GRADIENT_FILTERS, GRADIENT_OUTPUTS, N, K, C, HW)
// for (nk = 0; nk < N*K; nk++){
for (kc = 0; kc < K*C; kc++){
int k = kc/C;
int c = md(kc, C);
assert(k*C + c == kc);
for (int n = 0; n < N; n++){
// for (int hw = 0; hw < HW; hw++){
// GRADIENT_FILTERS[ti3(k, c, hw, C, HW)] += GRADIENT_OUTPUTS[ti3(n, k, hw, K, HW)] * conjf(INPUTS[ti3(n, c, hw, C, HW)]);
// }
GRADIENT_FILTERS[kc*HW : HW] += GRADIENT_OUTPUTS[(n*K + k)*HW : HW] * conjf(INPUTS[(n*C + c)*HW : HW]);
// probably PROBLEM WITH grad_inputs: grad_outputs is d/dReal, d/dImag and need to differentiate each individually
// GRADIENT_INPUTS[(n*C + c)*HW : HW] += GRADIENT_OUTPUTS[(n*K + k)*HW : HW] * conjf(FILTERS[kc*HW : HW]);
}
}
// int nc;
// // GRADIENT_INPUTS[0 : N*C*HW] = 0.f;
// #pragma omp parallel for private(nc) shared(INPUTS, GRADIENT_INPUTS, FILTERS, GRADIENT_FILTERS, GRADIENT_OUTPUTS, N, K, C, HW)
// // for (nk = 0; nk < N*K; nk++){
// for (nc = 0; nc < N*C; nc++){
// int n = nc/C;
// int c = md(nc, C);
// assert(n*C + c == nc);
// for (int k = 0; k < K; k++){
// // for (int hw = 0; hw < HW; hw++){
// // GRADIENT_FILTERS[ti3(k, c, hw, C, HW)] += GRADIENT_OUTPUTS[ti3(n, k, hw, K, HW)] * conjf(INPUTS[ti3(n, c, hw, C, HW)]);
// // }
// // GRADIENT_FILTERS[kc*HW : HW] += GRADIENT_OUTPUTS[(n*K + k)*HW : HW] * conjf(INPUTS[(n*C + c)*HW : HW]);
// GRADIENT_INPUTS[nc*HW : HW] += GRADIENT_OUTPUTS[(n*K + k)*HW : HW] * conjf(FILTERS[(k*C + c)*HW : HW]);
// }
// }
}
}
void low_pass_filter(int N, int H, int W, int H_out, int W_out, float complex *restrict INPUTS, float complex *restrict OUTPUTS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE) \
in(OUTPUTS:length(0) REUSE)
{
int n;
int W_freq = W_out/2 + 1;
#pragma omp parallel for private(n) shared(INPUTS, OUTPUTS, N, H, W, H_out, W_out, W_freq)
for (n = 0; n < N; n++){
for (int h = 0; h < H_out/2; h++){
OUTPUTS[ti3(n, h, 0, H_out, W_freq) : W_freq-1] = INPUTS[ti3(n, h, 0, H, W): W_freq-1];
}
for (int h = H_out/2 + 1; h < H_out; h++){
OUTPUTS[ti3(n, h, 1, H_out, W_freq) : W_freq-2] = INPUTS[ti3(n, H - (H_out - h), 1, H, W): W_freq-2];
}
}
}
}
void low_pass_filter_gradient(int N, int H, int W, int H_out, int W_out, float complex *restrict GRADIENT_INPUTS, float complex *restrict GRADIENT_OUTPUTS){
#pragma offload target(mic:MIC_DEV) \
in(GRADIENT_INPUTS:length(0) REUSE) \
in(GRADIENT_OUTPUTS:length(0) REUSE)
{
int n;
int W_freq = W_out/2 + 1;
#pragma omp parallel for private(n) shared(GRADIENT_INPUTS, GRADIENT_OUTPUTS, N, H, W, H_out, W_out, W_freq)
for (n = 0; n < N; n++){
for (int h = 0; h < H_out/2; h++){
GRADIENT_INPUTS[ti3(n, h, 0, H, W): W_freq-1] = GRADIENT_OUTPUTS[ti3(n, h, 0, H_out, W_freq) : W_freq-1];
}
for (int h = H_out/2 + 1; h < H_out; h++){
GRADIENT_INPUTS[ti3(n, H - (H_out - h), 1, H, W): W_freq-2] = GRADIENT_OUTPUTS[ti3(n, h, 1, H_out, W_freq) : W_freq-2];
}
}
}
}
void wipe_out_irrelevant_entries(int N, int H, int W, float complex *restrict INPUTS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE)
{
int n;
#pragma omp parallel for private(n) shared(INPUTS, N, H, W)
for (n = 0; n < N; n++){
// for (int h = 0; h < H; h++){
// for (int w = 1; w < W-1; w++){
// INPUTS[ti3(n, h, w, H, W)] -= I*cimagf(INPUTS[ti3(n, h, w, H, W)]);
// }
// }
int w = W-1;
for (int h = H/2+1; h < H; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
// for (int h = 1; h < H/2; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
// for (int h = 1; h < H/2; h++) INPUTS[ti3(n, h, w, H, W)] -= I*cimagf(INPUTS[ti3(n, h, w, H, W)]);
w = 0;
for (int h = H/2+1; h < H; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
// for (int h = 1; h < H/2; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
// for (int h = 1; h < H/2; h++) INPUTS[ti3(n, h, w, H, W)] -= I*cimagf(INPUTS[ti3(n, h, w, H, W)]);
INPUTS[ti3(n, 0, 0, H, W)] -= I*cimagf(INPUTS[ti3(n, 0, 0, H, W)]);
INPUTS[ti3(n, H/2, 0, H, W)] -= I*cimagf(INPUTS[ti3(n, H/2, 0, H, W)]);
INPUTS[ti3(n, 0, W-1, H, W)] -= I*cimagf(INPUTS[ti3(n, 0, W-1, H, W)]);
INPUTS[ti3(n, H/2, W-1, H, W)] -= I*cimagf(INPUTS[ti3(n, H/2, W-1, H, W)]);
}
}
}
void wipe_out_irrelevant_entries_full(int N, int H, int W, float complex *restrict INPUTS){
#pragma offload target(mic:MIC_DEV) \
in(INPUTS:length(0) REUSE)
{
int n;
#pragma omp parallel for private(n) shared(INPUTS, N, H, W)
for (n = 0; n < N; n++){
for (int h = 0; h < H; h++){
for (int w = W/2 + 1; w < W; w++){
INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
}
}
int w = W/2;
for (int h = H/2+1; h < H; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
w = 0;
for (int h = H/2+1; h < H; h++) INPUTS[ti3(n, h, w, H, W)] = (float complex) 0.f + 0.f*I;
INPUTS[ti3(n, 0, 0, H, W)] -= I*cimagf(INPUTS[ti3(n, 0, 0, H, W)]);
INPUTS[ti3(n, H/2, 0, H, W)] -= I*cimagf(INPUTS[ti3(n, H/2, 0, H, W)]);
INPUTS[ti3(n, 0, W/2, H, W)] -= I*cimagf(INPUTS[ti3(n, 0, W/2, H, W)]);
INPUTS[ti3(n, H/2, W/2, H, W)] -= I*cimagf(INPUTS[ti3(n, H/2, W/2, H, W)]);
}
}
}
void fft_conjugate_symmetry_scaling(int N, int K, int H, int FORWARD, int W_SPATIAL, float complex *restrict GRADIENT, float complex *restrict SCRATCH){
#pragma offload target(mic:MIC_DEV) \
in(GRADIENT:length(0) REUSE) \
in(SCRATCH:length(0) REUSE)
{
int W_HALF = W_SPATIAL/2 + 1;
int IS_EVEN = W_SPATIAL/2 - (W_SPATIAL - 1)/2; // 1 if even, 0 if offloaded
int nk, h, w;
float CONST = 2.0;
if (FORWARD == 0) CONST = 0.5;
// SCRATCH[0 : N*K*H*W_HALF] = GRADIENT[0 : N*K*H*W_HALF];
#pragma omp parallel for private(nk, h, w) shared(GRADIENT, N, K, H, W_SPATIAL, W_HALF, CONST, SCRATCH)
for (nk = 0; nk < N*K; nk++){
int n = nk/K;
int k = md(nk, K);
assert(n*K + k == nk);
for (h = 0; h < H; h++){
for (w = 0; w < W_HALF; w++){ // scan over all elements that have been removed due to conjugate symmetry
// GRADIENT[ti(n, k, h, w, K, H, W_HALF)] += conjf(SCRATCH[ti(n, k, h, w, K, H, W_HALF)]);
// GRADIENT[ti(n, k, h, w, K, H, W_HALF)] *= (double complex) 2.f + 0.f*I;
GRADIENT[ti(n, k, h, w, K, H, W_HALF)] *= CONST;
// GRADIENT[ti(n, k, h, w, K, H, W_HALF)] += -2.*I*cimagf(GRADIENT[ti(n, k, h, w, K, H, W_HALF)]);
// GRADIENT[ti(n, k, h, w, K, H, W_HALF)] += -I*crealf(GRADIENT[ti(n, k, h, w, K, H, W_HALF)]);
}
// for (w = 0; w < W_SPATIAL - W_HALF; w++){ // scan over all elements that have been removed due to conjugate symmetry
// GRADIENT[ti(n, k, h, W_HALF - 1 - w - IS_EVEN, K, H, W_HALF)] += conjf(SCRATCH[ti(n, k, md(H-h, H), W_HALF - 1 - w, K, H, W_HALF)]);
// }
}
GRADIENT[ti(n, k, 0, 0, K, H, W_HALF)] *= 1.0/CONST;
GRADIENT[ti(n, k, H/2, 0, K, H, W_HALF)] *= 1.0/CONST;
GRADIENT[ti(n, k, 0, W_HALF-1, K, H, W_HALF)] *= 1.0/CONST;
GRADIENT[ti(n, k, H/2, W_HALF-1, K, H, W_HALF)] *= 1.0/CONST;
// int w = W_HALF-1;
// for (int h = H/2+1; h < H; h++) INPUTS[ti(n, k, h, w, K, H, W_HALF)] *= 2.;
// w = 0;
// for (int h = H/2+1; h < H; h++) INPUTS[ti(n, k, h, w, K, H, W_HALF)] *= 2.;
// for k2=N2/2+1…N2-1: Z{k1,k2,m} = conj(AZ[os[0]+(N1-k1)%N1*os[1]+(N2-k2)%N2*os[2]+m*odist])
}
}
}
void conjugate(int N, float *restrict A){
#pragma offload target(mic:MIC_DEV) \
in(A:length(0) REUSE)
{
A[1:N:2] *= -1.f;
}
}
void real_and_imaginary(int N, float *restrict A, float *restrict REAL, float *restrict IMAGINARY){
#pragma offload target(mic:MIC_DEV) \
in(A:length(0) REUSE) \
in(REAL:length(0) REUSE) \
in(IMAGINARY:length(0) REUSE)
{
REAL[0:N] = A[0:N:2];
IMAGINARY[0:N] = A[1:N:2];
}
}
void amplitude_and_phase(int N, float complex *restrict A, float *restrict AMPLITUDE, float *restrict PHASE){
#pragma offload target(mic:MIC_DEV) \
in(A:length(0) REUSE) \
in(AMPLITUDE:length(0) REUSE) \
in(PHASE:length(0) REUSE)
{
// int n;
// #pragma omp parallel for private(n) shared(AMPLITUDE, PHASE, A)
// for (n = 0; n < N; n++){
// AMPLITUDE[n] = cabsf(A[n]);
// PHASE[n] = cargf(A[n]);
// }
AMPLITUDE[0:N] = crealf(A[0:N])*crealf(A[0:N]) + cimagf(A[0:N])*cimagf(A[0:N]);
PHASE[0:N] = cargf(A[0:N]);
}
} |
adjoint_stacks.c | uint *a1_STACKc = NULL; ulong a1_STACKc_c = 0;
int *a1_STACKi = NULL; ulong a1_STACKi_c = 0;
double *a1_STACKf = NULL; ulong a1_STACKf_c = 0;
#pragma omp threadprivate (a1_STACKc, a1_STACKc_c, a1_STACKi, a1_STACKi_c, a1_STACKf, a1_STACKf_c)
|
no_omp_cpu.c | /*
* Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sched.h>
#include <assert.h>
void no_omp_cpu(int num_cpu, cpu_set_t *no_omp)
{
int cpu_index, i;
for (i = 0; i < num_cpu; ++i) {
CPU_SET(i, no_omp);
}
#pragma omp parallel default(shared)
{
#pragma omp critical
{
cpu_index = sched_getcpu();
assert(cpu_index < num_cpu);
CPU_CLR(cpu_index, no_omp);
} /* end pragma omp critical */
} /* end pragam omp parallel */
}
int main(int argc, char **argv)
{
int i, num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
cpu_set_t *no_omp = CPU_ALLOC(num_cpu);
no_omp_cpu(num_cpu, no_omp);
printf("Free cpu list: ");
for (i = 0; i < num_cpu; ++i) {
if (CPU_ISSET(i, no_omp)) {
printf("%i ", i);
}
}
printf("\n\n");
CPU_FREE(no_omp);
return 0;
}
|
_hypre_utilities.h |
/*** DO NOT EDIT THIS FILE DIRECTLY (use 'headers' to generate) ***/
#ifndef hypre_UTILITIES_HEADER
#define hypre_UTILITIES_HEADER
#include "HYPRE_utilities.h"
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* General structures and values
*
*****************************************************************************/
#ifndef hypre_GENERAL_HEADER
#define hypre_GENERAL_HEADER
/* This allows us to consistently avoid 'int' throughout hypre */
typedef int hypre_int;
typedef long int hypre_longint;
typedef unsigned int hypre_uint;
typedef unsigned long int hypre_ulongint;
typedef unsigned long long int hypre_ulonglongint;
/* This allows us to consistently avoid 'double' throughout hypre */
typedef double hypre_double;
/*--------------------------------------------------------------------------
* Define various functions
*--------------------------------------------------------------------------*/
#ifndef hypre_max
#define hypre_max(a,b) (((a)<(b)) ? (b) : (a))
#endif
#ifndef hypre_min
#define hypre_min(a,b) (((a)<(b)) ? (a) : (b))
#endif
#ifndef hypre_abs
#define hypre_abs(a) (((a)>0) ? (a) : -(a))
#endif
#ifndef hypre_round
#define hypre_round(x) ( ((x) < 0.0) ? ((HYPRE_Int)(x - 0.5)) : ((HYPRE_Int)(x + 0.5)) )
#endif
#ifndef hypre_pow2
#define hypre_pow2(i) ( 1 << (i) )
#endif
#endif /* hypre_GENERAL_HEADER */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_PRINTF_HEADER
#define hypre_PRINTF_HEADER
#include <stdio.h>
/* printf.c */
// #ifdef HYPRE_BIGINT
HYPRE_Int hypre_ndigits( HYPRE_BigInt number );
HYPRE_Int hypre_printf( const char *format, ... );
HYPRE_Int hypre_fprintf( FILE *stream, const char *format, ... );
HYPRE_Int hypre_sprintf( char *s, const char *format, ... );
HYPRE_Int hypre_scanf( const char *format, ... );
HYPRE_Int hypre_fscanf( FILE *stream, const char *format, ... );
HYPRE_Int hypre_sscanf( char *s, const char *format, ... );
// #else
// #define hypre_printf printf
// #define hypre_fprintf fprintf
// #define hypre_sprintf sprintf
// #define hypre_scanf scanf
// #define hypre_fscanf fscanf
// #define hypre_sscanf sscanf
// #endif
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_ERROR_HEADER
#define hypre_ERROR_HEADER
#include <assert.h>
/*--------------------------------------------------------------------------
* Global variable used in hypre error checking
*--------------------------------------------------------------------------*/
extern HYPRE_Int hypre__global_error;
#define hypre_error_flag hypre__global_error
/*--------------------------------------------------------------------------
* HYPRE error macros
*--------------------------------------------------------------------------*/
void hypre_error_handler(const char *filename, HYPRE_Int line, HYPRE_Int ierr, const char *msg);
#define hypre_error(IERR) hypre_error_handler(__FILE__, __LINE__, IERR, NULL)
#define hypre_error_w_msg(IERR, msg) hypre_error_handler(__FILE__, __LINE__, IERR, msg)
#define hypre_error_in_arg(IARG) hypre_error(HYPRE_ERROR_ARG | IARG<<3)
#if defined(HYPRE_DEBUG)
/* host assert */
#define hypre_assert(EX) do { if (!(EX)) { fprintf(stderr, "[%s, %d] hypre_assert failed: %s\n", __FILE__, __LINE__, #EX); hypre_error(1); assert(0); } } while (0)
/* device assert */
#if defined(HYPRE_USING_CUDA)
#define hypre_device_assert(EX) assert(EX)
#elif defined(HYPRE_USING_HIP)
/* FIXME: Currently, asserts in device kernels in HIP do not behave well */
#define hypre_device_assert(EX)
#endif
#else /* #ifdef HYPRE_DEBUG */
/* this is to silence compiler's unused variable warnings */
#ifdef __cplusplus
#define hypre_assert(EX) do { if (0) { static_cast<void> (EX); } } while (0)
#else
#define hypre_assert(EX) do { if (0) { (void) (EX); } } while (0)
#endif
#define hypre_device_assert(EX)
#endif
#endif /* hypre_ERROR_HEADER */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Fake mpi stubs to generate serial codes without mpi
*
*****************************************************************************/
#ifndef hypre_MPISTUBS
#define hypre_MPISTUBS
#ifdef __cplusplus
extern "C" {
#endif
#ifdef HYPRE_SEQUENTIAL
/******************************************************************************
* MPI stubs to generate serial codes without mpi
*****************************************************************************/
/*--------------------------------------------------------------------------
* Change all MPI names to hypre_MPI names to avoid link conflicts.
*
* NOTE: MPI_Comm is the only MPI symbol in the HYPRE user interface,
* and is defined in `HYPRE_utilities.h'.
*--------------------------------------------------------------------------*/
#define MPI_Comm hypre_MPI_Comm
#define MPI_Group hypre_MPI_Group
#define MPI_Request hypre_MPI_Request
#define MPI_Datatype hypre_MPI_Datatype
#define MPI_Status hypre_MPI_Status
#define MPI_Op hypre_MPI_Op
#define MPI_Aint hypre_MPI_Aint
#define MPI_Info hypre_MPI_Info
#define MPI_COMM_WORLD hypre_MPI_COMM_WORLD
#define MPI_COMM_NULL hypre_MPI_COMM_NULL
#define MPI_COMM_SELF hypre_MPI_COMM_SELF
#define MPI_COMM_TYPE_SHARED hypre_MPI_COMM_TYPE_SHARED
#define MPI_BOTTOM hypre_MPI_BOTTOM
#define MPI_FLOAT hypre_MPI_FLOAT
#define MPI_DOUBLE hypre_MPI_DOUBLE
#define MPI_LONG_DOUBLE hypre_MPI_LONG_DOUBLE
#define MPI_INT hypre_MPI_INT
#define MPI_LONG_LONG_INT hypre_MPI_LONG_LONG_INT
#define MPI_CHAR hypre_MPI_CHAR
#define MPI_LONG hypre_MPI_LONG
#define MPI_BYTE hypre_MPI_BYTE
#define MPI_C_DOUBLE_COMPLEX hypre_MPI_COMPLEX
#define MPI_SUM hypre_MPI_SUM
#define MPI_MIN hypre_MPI_MIN
#define MPI_MAX hypre_MPI_MAX
#define MPI_LOR hypre_MPI_LOR
#define MPI_LAND hypre_MPI_LAND
#define MPI_SUCCESS hypre_MPI_SUCCESS
#define MPI_STATUSES_IGNORE hypre_MPI_STATUSES_IGNORE
#define MPI_UNDEFINED hypre_MPI_UNDEFINED
#define MPI_REQUEST_NULL hypre_MPI_REQUEST_NULL
#define MPI_INFO_NULL hypre_MPI_INFO_NULL
#define MPI_ANY_SOURCE hypre_MPI_ANY_SOURCE
#define MPI_ANY_TAG hypre_MPI_ANY_TAG
#define MPI_SOURCE hypre_MPI_SOURCE
#define MPI_TAG hypre_MPI_TAG
#define MPI_Init hypre_MPI_Init
#define MPI_Finalize hypre_MPI_Finalize
#define MPI_Abort hypre_MPI_Abort
#define MPI_Wtime hypre_MPI_Wtime
#define MPI_Wtick hypre_MPI_Wtick
#define MPI_Barrier hypre_MPI_Barrier
#define MPI_Comm_create hypre_MPI_Comm_create
#define MPI_Comm_dup hypre_MPI_Comm_dup
#define MPI_Comm_f2c hypre_MPI_Comm_f2c
#define MPI_Comm_group hypre_MPI_Comm_group
#define MPI_Comm_size hypre_MPI_Comm_size
#define MPI_Comm_rank hypre_MPI_Comm_rank
#define MPI_Comm_free hypre_MPI_Comm_free
#define MPI_Comm_split hypre_MPI_Comm_split
#define MPI_Comm_split_type hypre_MPI_Comm_split_type
#define MPI_Group_incl hypre_MPI_Group_incl
#define MPI_Group_free hypre_MPI_Group_free
#define MPI_Address hypre_MPI_Address
#define MPI_Get_count hypre_MPI_Get_count
#define MPI_Alltoall hypre_MPI_Alltoall
#define MPI_Allgather hypre_MPI_Allgather
#define MPI_Allgatherv hypre_MPI_Allgatherv
#define MPI_Gather hypre_MPI_Gather
#define MPI_Gatherv hypre_MPI_Gatherv
#define MPI_Scatter hypre_MPI_Scatter
#define MPI_Scatterv hypre_MPI_Scatterv
#define MPI_Bcast hypre_MPI_Bcast
#define MPI_Send hypre_MPI_Send
#define MPI_Recv hypre_MPI_Recv
#define MPI_Isend hypre_MPI_Isend
#define MPI_Irecv hypre_MPI_Irecv
#define MPI_Send_init hypre_MPI_Send_init
#define MPI_Recv_init hypre_MPI_Recv_init
#define MPI_Irsend hypre_MPI_Irsend
#define MPI_Startall hypre_MPI_Startall
#define MPI_Probe hypre_MPI_Probe
#define MPI_Iprobe hypre_MPI_Iprobe
#define MPI_Test hypre_MPI_Test
#define MPI_Testall hypre_MPI_Testall
#define MPI_Wait hypre_MPI_Wait
#define MPI_Waitall hypre_MPI_Waitall
#define MPI_Waitany hypre_MPI_Waitany
#define MPI_Allreduce hypre_MPI_Allreduce
#define MPI_Reduce hypre_MPI_Reduce
#define MPI_Scan hypre_MPI_Scan
#define MPI_Request_free hypre_MPI_Request_free
#define MPI_Type_contiguous hypre_MPI_Type_contiguous
#define MPI_Type_vector hypre_MPI_Type_vector
#define MPI_Type_hvector hypre_MPI_Type_hvector
#define MPI_Type_struct hypre_MPI_Type_struct
#define MPI_Type_commit hypre_MPI_Type_commit
#define MPI_Type_free hypre_MPI_Type_free
#define MPI_Op_free hypre_MPI_Op_free
#define MPI_Op_create hypre_MPI_Op_create
#define MPI_User_function hypre_MPI_User_function
#define MPI_Info_create hypre_MPI_Info_create
/*--------------------------------------------------------------------------
* Types, etc.
*--------------------------------------------------------------------------*/
/* These types have associated creation and destruction routines */
typedef HYPRE_Int hypre_MPI_Comm;
typedef HYPRE_Int hypre_MPI_Group;
typedef HYPRE_Int hypre_MPI_Request;
typedef HYPRE_Int hypre_MPI_Datatype;
typedef void (hypre_MPI_User_function) ();
typedef struct
{
HYPRE_Int hypre_MPI_SOURCE;
HYPRE_Int hypre_MPI_TAG;
} hypre_MPI_Status;
typedef HYPRE_Int hypre_MPI_Op;
typedef HYPRE_Int hypre_MPI_Aint;
typedef HYPRE_Int hypre_MPI_Info;
#define hypre_MPI_COMM_SELF 1
#define hypre_MPI_COMM_WORLD 0
#define hypre_MPI_COMM_NULL -1
#define hypre_MPI_COMM_TYPE_SHARED 0
#define hypre_MPI_BOTTOM 0x0
#define hypre_MPI_FLOAT 0
#define hypre_MPI_DOUBLE 1
#define hypre_MPI_LONG_DOUBLE 2
#define hypre_MPI_INT 3
#define hypre_MPI_CHAR 4
#define hypre_MPI_LONG 5
#define hypre_MPI_BYTE 6
#define hypre_MPI_REAL 7
#define hypre_MPI_COMPLEX 8
#define hypre_MPI_LONG_LONG_INT 9
#define hypre_MPI_SUM 0
#define hypre_MPI_MIN 1
#define hypre_MPI_MAX 2
#define hypre_MPI_LOR 3
#define hypre_MPI_LAND 4
#define hypre_MPI_SUCCESS 0
#define hypre_MPI_STATUSES_IGNORE 0
#define hypre_MPI_UNDEFINED -9999
#define hypre_MPI_REQUEST_NULL 0
#define hypre_MPI_INFO_NULL 0
#define hypre_MPI_ANY_SOURCE 1
#define hypre_MPI_ANY_TAG 1
#else
/******************************************************************************
* MPI stubs to do casting of HYPRE_Int and hypre_int correctly
*****************************************************************************/
typedef MPI_Comm hypre_MPI_Comm;
typedef MPI_Group hypre_MPI_Group;
typedef MPI_Request hypre_MPI_Request;
typedef MPI_Datatype hypre_MPI_Datatype;
typedef MPI_Status hypre_MPI_Status;
typedef MPI_Op hypre_MPI_Op;
typedef MPI_Aint hypre_MPI_Aint;
typedef MPI_Info hypre_MPI_Info;
typedef MPI_User_function hypre_MPI_User_function;
#define hypre_MPI_COMM_WORLD MPI_COMM_WORLD
#define hypre_MPI_COMM_NULL MPI_COMM_NULL
#define hypre_MPI_BOTTOM MPI_BOTTOM
#define hypre_MPI_COMM_SELF MPI_COMM_SELF
#define hypre_MPI_COMM_TYPE_SHARED MPI_COMM_TYPE_SHARED
#define hypre_MPI_FLOAT MPI_FLOAT
#define hypre_MPI_DOUBLE MPI_DOUBLE
#define hypre_MPI_LONG_DOUBLE MPI_LONG_DOUBLE
/* HYPRE_MPI_INT is defined in HYPRE_utilities.h */
#define hypre_MPI_INT HYPRE_MPI_INT
#define hypre_MPI_CHAR MPI_CHAR
#define hypre_MPI_LONG MPI_LONG
#define hypre_MPI_BYTE MPI_BYTE
/* HYPRE_MPI_REAL is defined in HYPRE_utilities.h */
#define hypre_MPI_REAL HYPRE_MPI_REAL
/* HYPRE_MPI_COMPLEX is defined in HYPRE_utilities.h */
#define hypre_MPI_COMPLEX HYPRE_MPI_COMPLEX
#define hypre_MPI_SUM MPI_SUM
#define hypre_MPI_MIN MPI_MIN
#define hypre_MPI_MAX MPI_MAX
#define hypre_MPI_LOR MPI_LOR
#define hypre_MPI_SUCCESS MPI_SUCCESS
#define hypre_MPI_STATUSES_IGNORE MPI_STATUSES_IGNORE
#define hypre_MPI_UNDEFINED MPI_UNDEFINED
#define hypre_MPI_REQUEST_NULL MPI_REQUEST_NULL
#define hypre_MPI_INFO_NULL MPI_INFO_NULL
#define hypre_MPI_ANY_SOURCE MPI_ANY_SOURCE
#define hypre_MPI_ANY_TAG MPI_ANY_TAG
#define hypre_MPI_SOURCE MPI_SOURCE
#define hypre_MPI_TAG MPI_TAG
#define hypre_MPI_LAND MPI_LAND
#endif
/******************************************************************************
* Everything below this applies to both ifdef cases above
*****************************************************************************/
/*--------------------------------------------------------------------------
* Prototypes
*--------------------------------------------------------------------------*/
/* mpistubs.c */
HYPRE_Int hypre_MPI_Init( hypre_int *argc, char ***argv );
HYPRE_Int hypre_MPI_Finalize( void );
HYPRE_Int hypre_MPI_Abort( hypre_MPI_Comm comm, HYPRE_Int errorcode );
HYPRE_Real hypre_MPI_Wtime( void );
HYPRE_Real hypre_MPI_Wtick( void );
HYPRE_Int hypre_MPI_Barrier( hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Comm_create( hypre_MPI_Comm comm, hypre_MPI_Group group,
hypre_MPI_Comm *newcomm );
HYPRE_Int hypre_MPI_Comm_dup( hypre_MPI_Comm comm, hypre_MPI_Comm *newcomm );
hypre_MPI_Comm hypre_MPI_Comm_f2c( hypre_int comm );
HYPRE_Int hypre_MPI_Comm_size( hypre_MPI_Comm comm, HYPRE_Int *size );
HYPRE_Int hypre_MPI_Comm_rank( hypre_MPI_Comm comm, HYPRE_Int *rank );
HYPRE_Int hypre_MPI_Comm_free( hypre_MPI_Comm *comm );
HYPRE_Int hypre_MPI_Comm_group( hypre_MPI_Comm comm, hypre_MPI_Group *group );
HYPRE_Int hypre_MPI_Comm_split( hypre_MPI_Comm comm, HYPRE_Int n, HYPRE_Int m,
hypre_MPI_Comm * comms );
HYPRE_Int hypre_MPI_Group_incl( hypre_MPI_Group group, HYPRE_Int n, HYPRE_Int *ranks,
hypre_MPI_Group *newgroup );
HYPRE_Int hypre_MPI_Group_free( hypre_MPI_Group *group );
HYPRE_Int hypre_MPI_Address( void *location, hypre_MPI_Aint *address );
HYPRE_Int hypre_MPI_Get_count( hypre_MPI_Status *status, hypre_MPI_Datatype datatype,
HYPRE_Int *count );
HYPRE_Int hypre_MPI_Alltoall( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Allgather( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Allgatherv( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int *recvcounts, HYPRE_Int *displs, hypre_MPI_Datatype recvtype,
hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Gather( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, HYPRE_Int root,
hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Gatherv( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int *recvcounts, HYPRE_Int *displs, hypre_MPI_Datatype recvtype,
HYPRE_Int root, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Scatter( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype,
void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, HYPRE_Int root,
hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Scatterv( void *sendbuf, HYPRE_Int *sendcounts, HYPRE_Int *displs,
hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype,
HYPRE_Int root, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Bcast( void *buffer, HYPRE_Int count, hypre_MPI_Datatype datatype,
HYPRE_Int root, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Send( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest,
HYPRE_Int tag, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Recv( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int source,
HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Isend( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest,
HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Irecv( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype,
HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Send_init( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype,
HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Recv_init( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype,
HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Irsend( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest,
HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Startall( HYPRE_Int count, hypre_MPI_Request *array_of_requests );
HYPRE_Int hypre_MPI_Probe( HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm,
hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Iprobe( HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, HYPRE_Int *flag,
hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Test( hypre_MPI_Request *request, HYPRE_Int *flag, hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Testall( HYPRE_Int count, hypre_MPI_Request *array_of_requests, HYPRE_Int *flag,
hypre_MPI_Status *array_of_statuses );
HYPRE_Int hypre_MPI_Wait( hypre_MPI_Request *request, hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Waitall( HYPRE_Int count, hypre_MPI_Request *array_of_requests,
hypre_MPI_Status *array_of_statuses );
HYPRE_Int hypre_MPI_Waitany( HYPRE_Int count, hypre_MPI_Request *array_of_requests,
HYPRE_Int *index, hypre_MPI_Status *status );
HYPRE_Int hypre_MPI_Allreduce( void *sendbuf, void *recvbuf, HYPRE_Int count,
hypre_MPI_Datatype datatype, hypre_MPI_Op op, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Reduce( void *sendbuf, void *recvbuf, HYPRE_Int count,
hypre_MPI_Datatype datatype, hypre_MPI_Op op, HYPRE_Int root, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Scan( void *sendbuf, void *recvbuf, HYPRE_Int count,
hypre_MPI_Datatype datatype, hypre_MPI_Op op, hypre_MPI_Comm comm );
HYPRE_Int hypre_MPI_Request_free( hypre_MPI_Request *request );
HYPRE_Int hypre_MPI_Type_contiguous( HYPRE_Int count, hypre_MPI_Datatype oldtype,
hypre_MPI_Datatype *newtype );
HYPRE_Int hypre_MPI_Type_vector( HYPRE_Int count, HYPRE_Int blocklength, HYPRE_Int stride,
hypre_MPI_Datatype oldtype, hypre_MPI_Datatype *newtype );
HYPRE_Int hypre_MPI_Type_hvector( HYPRE_Int count, HYPRE_Int blocklength, hypre_MPI_Aint stride,
hypre_MPI_Datatype oldtype, hypre_MPI_Datatype *newtype );
HYPRE_Int hypre_MPI_Type_struct( HYPRE_Int count, HYPRE_Int *array_of_blocklengths,
hypre_MPI_Aint *array_of_displacements, hypre_MPI_Datatype *array_of_types,
hypre_MPI_Datatype *newtype );
HYPRE_Int hypre_MPI_Type_commit( hypre_MPI_Datatype *datatype );
HYPRE_Int hypre_MPI_Type_free( hypre_MPI_Datatype *datatype );
HYPRE_Int hypre_MPI_Op_free( hypre_MPI_Op *op );
HYPRE_Int hypre_MPI_Op_create( hypre_MPI_User_function *function, hypre_int commute,
hypre_MPI_Op *op );
#if defined(HYPRE_USING_GPU)
HYPRE_Int hypre_MPI_Comm_split_type(hypre_MPI_Comm comm, HYPRE_Int split_type, HYPRE_Int key,
hypre_MPI_Info info, hypre_MPI_Comm *newcomm);
HYPRE_Int hypre_MPI_Info_create(hypre_MPI_Info *info);
HYPRE_Int hypre_MPI_Info_free( hypre_MPI_Info *info );
#endif
#ifdef __cplusplus
}
#endif
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef HYPRE_SMP_HEADER
#define HYPRE_SMP_HEADER
#endif
#define HYPRE_SMP_SCHEDULE schedule(static)
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header file for memory management utilities
*
* The abstract memory model has a Host (think CPU) and a Device (think GPU) and
* three basic types of memory management utilities:
*
* 1. Malloc(..., location)
* location=LOCATION_DEVICE - malloc memory on the device
* location=LOCATION_HOST - malloc memory on the host
* 2. MemCopy(..., method)
* method=HOST_TO_DEVICE - copy from host to device
* method=DEVICE_TO_HOST - copy from device to host
* method=DEVICE_TO_DEVICE - copy from device to device
* 3. SetExecutionMode
* location=LOCATION_DEVICE - execute on the device
* location=LOCATION_HOST - execute on the host
*
* Although the abstract model does not explicitly reflect a managed memory
* model (i.e., unified memory), it can support it. Here is a summary of how
* the abstract model would be mapped to specific hardware scenarios:
*
* Not using a device, not using managed memory
* Malloc(..., location)
* location=LOCATION_DEVICE - host malloc e.g., malloc
* location=LOCATION_HOST - host malloc e.g., malloc
* MemoryCopy(..., locTo,locFrom)
* locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from host to host e.g., memcpy
* locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to host e.g., memcpy
* locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from host to host e.g., memcpy
* SetExecutionMode
* location=LOCATION_DEVICE - execute on the host
* location=LOCATION_HOST - execute on the host
*
* Using a device, not using managed memory
* Malloc(..., location)
* location=LOCATION_DEVICE - device malloc e.g., cudaMalloc
* location=LOCATION_HOST - host malloc e.g., malloc
* MemoryCopy(..., locTo,locFrom)
* locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from device to host e.g., cudaMemcpy
* locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to device e.g., cudaMemcpy
* locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from device to device e.g., cudaMemcpy
* SetExecutionMode
* location=LOCATION_DEVICE - execute on the device
* location=LOCATION_HOST - execute on the host
*
* Using a device, using managed memory
* Malloc(..., location)
* location=LOCATION_DEVICE - managed malloc e.g., cudaMallocManaged
* location=LOCATION_HOST - host malloc e.g., malloc
* MemoryCopy(..., locTo,locFrom)
* locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from device to host e.g., cudaMallocManaged
* locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to device e.g., cudaMallocManaged
* locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from device to device e.g., cudaMallocManaged
* SetExecutionMode
* location=LOCATION_DEVICE - execute on the device
* location=LOCATION_HOST - execute on the host
*
*****************************************************************************/
#ifndef hypre_MEMORY_HEADER
#define hypre_MEMORY_HEADER
#include <stdio.h>
#include <stdlib.h>
#if defined(HYPRE_USING_UMPIRE)
#include "umpire/interface/umpire.h"
#define HYPRE_UMPIRE_POOL_NAME_MAX_LEN 1024
#endif
/* stringification:
* _Pragma(string-literal), so we need to cast argument to a string
* The three dots as last argument of the macro tells compiler that this is a variadic macro.
* I.e. this is a macro that receives variable number of arguments.
*/
#define HYPRE_STR(...) #__VA_ARGS__
#define HYPRE_XSTR(...) HYPRE_STR(__VA_ARGS__)
#ifdef __cplusplus
extern "C" {
#endif
typedef enum _hypre_MemoryLocation
{
hypre_MEMORY_UNDEFINED = -1,
hypre_MEMORY_HOST,
hypre_MEMORY_HOST_PINNED,
hypre_MEMORY_DEVICE,
hypre_MEMORY_UNIFIED
} hypre_MemoryLocation;
/*-------------------------------------------------------
* hypre_GetActualMemLocation
* return actual location based on the selected memory model
*-------------------------------------------------------*/
static inline hypre_MemoryLocation
hypre_GetActualMemLocation(HYPRE_MemoryLocation location)
{
if (location == HYPRE_MEMORY_HOST)
{
return hypre_MEMORY_HOST;
}
if (location == HYPRE_MEMORY_DEVICE)
{
#if defined(HYPRE_USING_HOST_MEMORY)
return hypre_MEMORY_HOST;
#elif defined(HYPRE_USING_DEVICE_MEMORY)
return hypre_MEMORY_DEVICE;
#elif defined(HYPRE_USING_UNIFIED_MEMORY)
return hypre_MEMORY_UNIFIED;
#else
#error Wrong HYPRE memory setting.
#endif
}
return hypre_MEMORY_UNDEFINED;
}
#ifdef HYPRE_USING_MEMORY_TRACKER
typedef struct
{
char _action[16];
void *_ptr;
size_t _nbytes;
hypre_MemoryLocation _memory_location;
char _filename[256];
char _function[256];
HYPRE_Int _line;
size_t _pair;
} hypre_MemoryTrackerEntry;
typedef struct
{
size_t actual_size;
size_t alloced_size;
size_t prev_end;
hypre_MemoryTrackerEntry *data;
} hypre_MemoryTracker;
/* These Allocs are with memory tracker, for debug */
#define hypre_TAlloc(type, count, location) \
( \
{ \
void *ptr = hypre_MAlloc((size_t)(sizeof(type) * (count)), location); \
hypre_MemoryTrackerInsert("malloc", ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\
(type *) ptr; \
} \
)
#define _hypre_TAlloc(type, count, location) \
( \
{ \
void *ptr = _hypre_MAlloc((size_t)(sizeof(type) * (count)), location); \
hypre_MemoryTrackerInsert("malloc", ptr, sizeof(type)*(count), location, __FILE__, __func__, __LINE__); \
(type *) ptr; \
} \
)
#define hypre_CTAlloc(type, count, location) \
( \
{ \
void *ptr = hypre_CAlloc((size_t)(count), (size_t)sizeof(type), location); \
hypre_MemoryTrackerInsert("calloc", ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\
(type *) ptr; \
} \
)
#define hypre_TReAlloc(ptr, type, count, location) \
( \
{ \
hypre_MemoryTrackerInsert("rfree", ptr, (size_t) -1, hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \
void *new_ptr = hypre_ReAlloc((char *)ptr, (size_t)(sizeof(type) * (count)), location); \
hypre_MemoryTrackerInsert("rmalloc", new_ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\
(type *) new_ptr; \
} \
)
#define hypre_TReAlloc_v2(ptr, old_type, old_count, new_type, new_count, location) \
( \
{ \
hypre_MemoryTrackerInsert("rfree", ptr, sizeof(old_type)*(old_count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \
void *new_ptr = hypre_ReAlloc_v2((char *)ptr, (size_t)(sizeof(old_type)*(old_count)), (size_t)(sizeof(new_type)*(new_count)), location); \
hypre_MemoryTrackerInsert("rmalloc", new_ptr, sizeof(new_type)*(new_count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\
(new_type *) new_ptr; \
} \
)
#define hypre_TMemcpy(dst, src, type, count, locdst, locsrc) \
( \
{ \
hypre_Memcpy((void *)(dst), (void *)(src), (size_t)(sizeof(type) * (count)), locdst, locsrc); \
} \
)
#define hypre_TFree(ptr, location) \
( \
{ \
hypre_MemoryTrackerInsert("free", ptr, (size_t) -1, hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \
hypre_Free((void *)ptr, location); \
ptr = NULL; \
} \
)
#define _hypre_TFree(ptr, location) \
( \
{ \
hypre_MemoryTrackerInsert("free", ptr, (size_t) -1, location, __FILE__, __func__, __LINE__); \
_hypre_Free((void *)ptr, location); \
ptr = NULL; \
} \
)
#else /* #ifdef HYPRE_USING_MEMORY_TRACKER */
#define hypre_TAlloc(type, count, location) \
( (type *) hypre_MAlloc((size_t)(sizeof(type) * (count)), location) )
#define _hypre_TAlloc(type, count, location) \
( (type *) _hypre_MAlloc((size_t)(sizeof(type) * (count)), location) )
#define hypre_CTAlloc(type, count, location) \
( (type *) hypre_CAlloc((size_t)(count), (size_t)sizeof(type), location) )
#define hypre_TReAlloc(ptr, type, count, location) \
( (type *) hypre_ReAlloc((char *)ptr, (size_t)(sizeof(type) * (count)), location) )
#define hypre_TReAlloc_v2(ptr, old_type, old_count, new_type, new_count, location) \
( (new_type *) hypre_ReAlloc_v2((char *)ptr, (size_t)(sizeof(old_type)*(old_count)), (size_t)(sizeof(new_type)*(new_count)), location) )
#define hypre_TMemcpy(dst, src, type, count, locdst, locsrc) \
(hypre_Memcpy((void *)(dst), (void *)(src), (size_t)(sizeof(type) * (count)), locdst, locsrc))
#define hypre_TFree(ptr, location) \
( hypre_Free((void *)ptr, location), ptr = NULL )
#define _hypre_TFree(ptr, location) \
( _hypre_Free((void *)ptr, location), ptr = NULL )
#endif /* #ifdef HYPRE_USING_MEMORY_TRACKER */
/*--------------------------------------------------------------------------
* Prototypes
*--------------------------------------------------------------------------*/
/* memory.c */
void * hypre_Memset(void *ptr, HYPRE_Int value, size_t num, HYPRE_MemoryLocation location);
void hypre_MemPrefetch(void *ptr, size_t size, HYPRE_MemoryLocation location);
void * hypre_MAlloc(size_t size, HYPRE_MemoryLocation location);
void * hypre_CAlloc( size_t count, size_t elt_size, HYPRE_MemoryLocation location);
void hypre_Free(void *ptr, HYPRE_MemoryLocation location);
void hypre_Memcpy(void *dst, void *src, size_t size, HYPRE_MemoryLocation loc_dst,
HYPRE_MemoryLocation loc_src);
void * hypre_ReAlloc(void *ptr, size_t size, HYPRE_MemoryLocation location);
void * hypre_ReAlloc_v2(void *ptr, size_t old_size, size_t new_size, HYPRE_MemoryLocation location);
void * _hypre_MAlloc(size_t size, hypre_MemoryLocation location);
void _hypre_Free(void *ptr, hypre_MemoryLocation location);
HYPRE_ExecutionPolicy hypre_GetExecPolicy1(HYPRE_MemoryLocation location);
HYPRE_ExecutionPolicy hypre_GetExecPolicy2(HYPRE_MemoryLocation location1,
HYPRE_MemoryLocation location2);
HYPRE_Int hypre_GetPointerLocation(const void *ptr, hypre_MemoryLocation *memory_location);
HYPRE_Int hypre_PrintMemoryTracker();
HYPRE_Int hypre_SetCubMemPoolSize( hypre_uint bin_growth, hypre_uint min_bin, hypre_uint max_bin,
size_t max_cached_bytes );
HYPRE_Int hypre_umpire_host_pooled_allocate(void **ptr, size_t nbytes);
HYPRE_Int hypre_umpire_host_pooled_free(void *ptr);
void *hypre_umpire_host_pooled_realloc(void *ptr, size_t size);
HYPRE_Int hypre_umpire_device_pooled_allocate(void **ptr, size_t nbytes);
HYPRE_Int hypre_umpire_device_pooled_free(void *ptr);
HYPRE_Int hypre_umpire_um_pooled_allocate(void **ptr, size_t nbytes);
HYPRE_Int hypre_umpire_um_pooled_free(void *ptr);
HYPRE_Int hypre_umpire_pinned_pooled_allocate(void **ptr, size_t nbytes);
HYPRE_Int hypre_umpire_pinned_pooled_free(void *ptr);
#ifdef HYPRE_USING_MEMORY_TRACKER
hypre_MemoryTracker * hypre_MemoryTrackerCreate();
void hypre_MemoryTrackerDestroy(hypre_MemoryTracker *tracker);
void hypre_MemoryTrackerInsert(const char *action, void *ptr, size_t nbytes,
hypre_MemoryLocation memory_location, const char *filename, const char *function, HYPRE_Int line);
HYPRE_Int hypre_PrintMemoryTracker();
#endif
/* memory_dmalloc.c */
HYPRE_Int hypre_InitMemoryDebugDML( HYPRE_Int id );
HYPRE_Int hypre_FinalizeMemoryDebugDML( void );
char *hypre_MAllocDML( HYPRE_Int size, char *file, HYPRE_Int line );
char *hypre_CAllocDML( HYPRE_Int count, HYPRE_Int elt_size, char *file, HYPRE_Int line );
char *hypre_ReAllocDML( char *ptr, HYPRE_Int size, char *file, HYPRE_Int line );
void hypre_FreeDML( char *ptr, char *file, HYPRE_Int line );
/* GPU malloc prototype */
typedef void (*GPUMallocFunc)(void **, size_t);
typedef void (*GPUMfreeFunc)(void *);
#ifdef __cplusplus
}
#endif
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef HYPRE_OMP_DEVICE_H
#define HYPRE_OMP_DEVICE_H
#if defined(HYPRE_USING_DEVICE_OPENMP)
#include "omp.h"
/* OpenMP 4.5 device memory management */
extern HYPRE_Int hypre__global_offload;
extern HYPRE_Int hypre__offload_device_num;
extern HYPRE_Int hypre__offload_host_num;
/* stats */
extern size_t hypre__target_allc_count;
extern size_t hypre__target_free_count;
extern size_t hypre__target_allc_bytes;
extern size_t hypre__target_free_bytes;
extern size_t hypre__target_htod_count;
extern size_t hypre__target_dtoh_count;
extern size_t hypre__target_htod_bytes;
extern size_t hypre__target_dtoh_bytes;
/* CHECK MODE: check if offloading has effect (turned on when configured with --enable-debug)
* if we ``enter'' an address, it should not exist in device [o.w NO EFFECT]
* if we ``exit'' or ''update'' an address, it should exist in device [o.w ERROR]
* hypre__offload_flag: 0 == OK; 1 == WRONG
*/
#ifdef HYPRE_DEVICE_OPENMP_CHECK
#define HYPRE_OFFLOAD_FLAG(devnum, hptr, type) HYPRE_Int hypre__offload_flag = (type[1] == 'n') == omp_target_is_present(hptr, devnum);
#else
#define HYPRE_OFFLOAD_FLAG(...) HYPRE_Int hypre__offload_flag = 0; /* non-debug mode, always OK */
#endif
/* OMP 4.5 offloading macro */
#define hypre_omp_device_offload(devnum, hptr, datatype, offset, count, type1, type2) \
{\
/* devnum: device number \
* hptr: host poiter \
* datatype \
* type1: ``e(n)ter'', ''e(x)it'', or ``u(p)date'' \
* type2: ``(a)lloc'', ``(t)o'', ``(d)elete'', ''(f)rom'' \
*/ \
datatype *hypre__offload_hptr = (datatype *) hptr; \
/* if hypre__global_offload == 0, or
* hptr (host pointer) == NULL,
* this offload will be IGNORED */ \
if (hypre__global_offload && hypre__offload_hptr != NULL) { \
/* offloading offset and size (in datatype) */ \
size_t hypre__offload_offset = offset, hypre__offload_size = count; \
/* in the CHECK mode, we test if this offload has effect */ \
HYPRE_OFFLOAD_FLAG(devnum, hypre__offload_hptr, type1) \
if (hypre__offload_flag) { \
printf("[!NO Effect! %s %d] device %d target: %6s %6s, data %p, [%ld:%ld]\n", __FILE__, __LINE__, devnum, type1, type2, (void *)hypre__offload_hptr, hypre__offload_offset, hypre__offload_size); exit(0); \
} else { \
size_t offload_bytes = count * sizeof(datatype); \
/* printf("[ %s %d] device %d target: %6s %6s, data %p, [%d:%d]\n", __FILE__, __LINE__, devnum, type1, type2, (void *)hypre__offload_hptr, hypre__offload_offset, hypre__offload_size); */ \
if (type1[1] == 'n' && type2[0] == 't') { \
/* enter to */\
hypre__target_allc_count ++; \
hypre__target_allc_bytes += offload_bytes; \
hypre__target_htod_count ++; \
hypre__target_htod_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target enter data map(to:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else if (type1[1] == 'n' && type2[0] == 'a') { \
/* enter alloc */ \
hypre__target_allc_count ++; \
hypre__target_allc_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target enter data map(alloc:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else if (type1[1] == 'x' && type2[0] == 'd') { \
/* exit delete */\
hypre__target_free_count ++; \
hypre__target_free_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target exit data map(delete:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else if (type1[1] == 'x' && type2[0] == 'f') {\
/* exit from */ \
hypre__target_free_count ++; \
hypre__target_free_bytes += offload_bytes; \
hypre__target_dtoh_count ++; \
hypre__target_dtoh_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target exit data map(from:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else if (type1[1] == 'p' && type2[0] == 't') { \
/* update to */ \
hypre__target_htod_count ++; \
hypre__target_htod_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target update to(hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else if (type1[1] == 'p' && type2[0] == 'f') {\
/* update from */ \
hypre__target_dtoh_count ++; \
hypre__target_dtoh_bytes += offload_bytes; \
_Pragma (HYPRE_XSTR(omp target update from(hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \
} else {\
printf("error: unrecognized offloading type combination!\n"); exit(-1); \
} \
} \
} \
}
HYPRE_Int HYPRE_OMPOffload(HYPRE_Int device, void *ptr, size_t num, const char *type1,
const char *type2);
HYPRE_Int HYPRE_OMPPtrIsMapped(void *p, HYPRE_Int device_num);
HYPRE_Int HYPRE_OMPOffloadOn();
HYPRE_Int HYPRE_OMPOffloadOff();
HYPRE_Int HYPRE_OMPOffloadStatPrint();
#endif /* HYPRE_USING_DEVICE_OPENMP */
#endif /* HYPRE_OMP_DEVICE_H */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_THREADING_HEADER
#define hypre_THREADING_HEADER
#ifdef HYPRE_USING_OPENMP
HYPRE_Int hypre_NumThreads( void );
HYPRE_Int hypre_NumActiveThreads( void );
HYPRE_Int hypre_GetThreadNum( void );
void hypre_SetNumThreads(HYPRE_Int nt);
#else
#define hypre_NumThreads() 1
#define hypre_NumActiveThreads() 1
#define hypre_GetThreadNum() 0
#define hypre_SetNumThreads(x)
#endif
void hypre_GetSimpleThreadPartition( HYPRE_Int *begin, HYPRE_Int *end, HYPRE_Int n );
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header file for doing timing
*
*****************************************************************************/
#ifndef HYPRE_TIMING_HEADER
#define HYPRE_TIMING_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
* Prototypes for low-level timing routines
*--------------------------------------------------------------------------*/
/* timer.c */
HYPRE_Real time_getWallclockSeconds( void );
HYPRE_Real time_getCPUSeconds( void );
HYPRE_Real time_get_wallclock_seconds_( void );
HYPRE_Real time_get_cpu_seconds_( void );
/*--------------------------------------------------------------------------
* With timing off
*--------------------------------------------------------------------------*/
#ifndef HYPRE_TIMING
#define hypre_InitializeTiming(name) 0
#define hypre_FinalizeTiming(index)
#define hypre_IncFLOPCount(inc)
#define hypre_BeginTiming(i)
#define hypre_EndTiming(i)
#define hypre_PrintTiming(heading, comm)
#define hypre_ClearTiming()
/*--------------------------------------------------------------------------
* With timing on
*--------------------------------------------------------------------------*/
#else
/*-------------------------------------------------------
* Global timing structure
*-------------------------------------------------------*/
typedef struct
{
HYPRE_Real *wall_time;
HYPRE_Real *cpu_time;
HYPRE_Real *flops;
char **name;
HYPRE_Int *state; /* boolean flag to allow for recursive timing */
HYPRE_Int *num_regs; /* count of how many times a name is registered */
HYPRE_Int num_names;
HYPRE_Int size;
HYPRE_Real wall_count;
HYPRE_Real CPU_count;
HYPRE_Real FLOP_count;
} hypre_TimingType;
#ifdef HYPRE_TIMING_GLOBALS
hypre_TimingType *hypre_global_timing = NULL;
#else
extern hypre_TimingType *hypre_global_timing;
#endif
/*-------------------------------------------------------
* Accessor functions
*-------------------------------------------------------*/
#define hypre_TimingWallTime(i) (hypre_global_timing -> wall_time[(i)])
#define hypre_TimingCPUTime(i) (hypre_global_timing -> cpu_time[(i)])
#define hypre_TimingFLOPS(i) (hypre_global_timing -> flops[(i)])
#define hypre_TimingName(i) (hypre_global_timing -> name[(i)])
#define hypre_TimingState(i) (hypre_global_timing -> state[(i)])
#define hypre_TimingNumRegs(i) (hypre_global_timing -> num_regs[(i)])
#define hypre_TimingWallCount (hypre_global_timing -> wall_count)
#define hypre_TimingCPUCount (hypre_global_timing -> CPU_count)
#define hypre_TimingFLOPCount (hypre_global_timing -> FLOP_count)
/*-------------------------------------------------------
* Prototypes
*-------------------------------------------------------*/
/* timing.c */
HYPRE_Int hypre_InitializeTiming( const char *name );
HYPRE_Int hypre_FinalizeTiming( HYPRE_Int time_index );
HYPRE_Int hypre_IncFLOPCount( HYPRE_BigInt inc );
HYPRE_Int hypre_BeginTiming( HYPRE_Int time_index );
HYPRE_Int hypre_EndTiming( HYPRE_Int time_index );
HYPRE_Int hypre_ClearTiming( void );
HYPRE_Int hypre_PrintTiming( const char *heading, MPI_Comm comm );
#endif
#ifdef __cplusplus
}
#endif
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header file link lists
*
*****************************************************************************/
#ifndef HYPRE_LINKLIST_HEADER
#define HYPRE_LINKLIST_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
struct double_linked_list
{
HYPRE_Int data;
struct double_linked_list *next_elt;
struct double_linked_list *prev_elt;
HYPRE_Int head;
HYPRE_Int tail;
};
typedef struct double_linked_list hypre_ListElement;
typedef hypre_ListElement *hypre_LinkList;
#ifdef __cplusplus
}
#endif
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_EXCHANGE_DATA_HEADER
#define hypre_EXCHANGE_DATA_HEADER
#define hypre_BinaryTreeParentId(tree) (tree->parent_id)
#define hypre_BinaryTreeNumChild(tree) (tree->num_child)
#define hypre_BinaryTreeChildIds(tree) (tree->child_id)
#define hypre_BinaryTreeChildId(tree, i) (tree->child_id[i])
typedef struct
{
HYPRE_Int parent_id;
HYPRE_Int num_child;
HYPRE_Int *child_id;
} hypre_BinaryTree;
/* In the fill_response() function the user needs to set the recv__buf
and the response_message_size. Memory of size send_response_storage has been
alllocated for the send_buf (in exchange_data) - if more is needed, then
realloc and adjust
the send_response_storage. The realloc amount should be storage+overhead.
If the response is an empty "confirmation" message, then set
response_message_size =0 (and do not modify the send_buf) */
typedef struct
{
HYPRE_Int (*fill_response)(void* recv_buf, HYPRE_Int contact_size,
HYPRE_Int contact_proc, void* response_obj,
MPI_Comm comm, void** response_buf,
HYPRE_Int* response_message_size);
HYPRE_Int send_response_overhead; /*set by exchange data */
HYPRE_Int send_response_storage; /*storage allocated for send_response_buf*/
void *data1; /*data fields user may want to access in fill_response */
void *data2;
} hypre_DataExchangeResponse;
HYPRE_Int hypre_CreateBinaryTree(HYPRE_Int, HYPRE_Int, hypre_BinaryTree*);
HYPRE_Int hypre_DestroyBinaryTree(hypre_BinaryTree*);
HYPRE_Int hypre_DataExchangeList(HYPRE_Int num_contacts, HYPRE_Int *contact_proc_list,
void *contact_send_buf, HYPRE_Int *contact_send_buf_starts, HYPRE_Int contact_obj_size,
HYPRE_Int response_obj_size, hypre_DataExchangeResponse *response_obj, HYPRE_Int max_response_size,
HYPRE_Int rnum, MPI_Comm comm, void **p_response_recv_buf, HYPRE_Int **p_response_recv_buf_starts);
#endif /* end of header */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header file for Caliper instrumentation macros
*
*****************************************************************************/
#ifndef CALIPER_INSTRUMENTATION_HEADER
#define CALIPER_INSTRUMENTATION_HEADER
#include "HYPRE_config.h"
#ifdef HYPRE_USING_CALIPER
#ifdef __cplusplus
extern "C++"
{
#endif
#include <caliper/cali.h>
#ifdef __cplusplus
}
#endif
static char hypre__levelname[16];
static char hypre__markname[1024];
#define HYPRE_ANNOTATE_FUNC_BEGIN CALI_MARK_FUNCTION_BEGIN
#define HYPRE_ANNOTATE_FUNC_END CALI_MARK_FUNCTION_END
#define HYPRE_ANNOTATE_LOOP_BEGIN(id, str) CALI_MARK_LOOP_BEGIN(id, str)
#define HYPRE_ANNOTATE_LOOP_END(id) CALI_MARK_LOOP_END(id)
#define HYPRE_ANNOTATE_ITER_BEGIN(id, it) CALI_MARK_ITERATION_BEGIN(id, it)
#define HYPRE_ANNOTATE_ITER_END(id) CALI_MARK_ITERATION_END(id)
#define HYPRE_ANNOTATE_REGION_BEGIN(...)\
{\
hypre_sprintf(hypre__markname, __VA_ARGS__);\
CALI_MARK_BEGIN(hypre__markname);\
}
#define HYPRE_ANNOTATE_REGION_END(...)\
{\
hypre_sprintf(hypre__markname, __VA_ARGS__);\
CALI_MARK_END(hypre__markname);\
}
#define HYPRE_ANNOTATE_MGLEVEL_BEGIN(lvl)\
{\
hypre_sprintf(hypre__levelname, "MG level %d", lvl);\
CALI_MARK_BEGIN(hypre__levelname);\
}
#define HYPRE_ANNOTATE_MGLEVEL_END(lvl)\
{\
hypre_sprintf(hypre__levelname, "MG level %d", lvl);\
CALI_MARK_END(hypre__levelname);\
}
#else
#define HYPRE_ANNOTATE_FUNC_BEGIN
#define HYPRE_ANNOTATE_FUNC_END
#define HYPRE_ANNOTATE_LOOP_BEGIN(id, str)
#define HYPRE_ANNOTATE_LOOP_END(id)
#define HYPRE_ANNOTATE_ITER_BEGIN(id, it)
#define HYPRE_ANNOTATE_ITER_END(id)
#define HYPRE_ANNOTATE_REGION_BEGIN(...)
#define HYPRE_ANNOTATE_REGION_END(...)
#define HYPRE_ANNOTATE_MAX_MGLEVEL(lvl)
#define HYPRE_ANNOTATE_MGLEVEL_BEGIN(lvl)
#define HYPRE_ANNOTATE_MGLEVEL_END(lvl)
#endif
#endif /* CALIPER_INSTRUMENTATION_HEADER */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* General structures and values
*
*****************************************************************************/
#ifndef HYPRE_HANDLE_H
#define HYPRE_HANDLE_H
struct hypre_CudaData;
typedef struct hypre_CudaData hypre_CudaData;
typedef struct
{
HYPRE_Int hypre_error;
HYPRE_MemoryLocation memory_location;
HYPRE_ExecutionPolicy default_exec_policy;
HYPRE_ExecutionPolicy struct_exec_policy;
#if defined(HYPRE_USING_GPU)
hypre_CudaData *cuda_data;
/* device G-S options */
HYPRE_Int device_gs_method;
#endif
#if defined(HYPRE_USING_UMPIRE)
char umpire_device_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN];
char umpire_um_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN];
char umpire_host_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN];
char umpire_pinned_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN];
size_t umpire_device_pool_size;
size_t umpire_um_pool_size;
size_t umpire_host_pool_size;
size_t umpire_pinned_pool_size;
size_t umpire_block_size;
HYPRE_Int own_umpire_device_pool;
HYPRE_Int own_umpire_um_pool;
HYPRE_Int own_umpire_host_pool;
HYPRE_Int own_umpire_pinned_pool;
umpire_resourcemanager umpire_rm;
#endif
/* user malloc/free function pointers */
GPUMallocFunc user_device_malloc;
GPUMfreeFunc user_device_free;
} hypre_Handle;
/* accessor macros to hypre_Handle */
#define hypre_HandleMemoryLocation(hypre_handle) ((hypre_handle) -> memory_location)
#define hypre_HandleDefaultExecPolicy(hypre_handle) ((hypre_handle) -> default_exec_policy)
#define hypre_HandleStructExecPolicy(hypre_handle) ((hypre_handle) -> struct_exec_policy)
#define hypre_HandleCudaData(hypre_handle) ((hypre_handle) -> cuda_data)
#define hypre_HandleDeviceGSMethod(hypre_handle) ((hypre_handle) -> device_gs_method)
#define hypre_HandleCurandGenerator(hypre_handle) hypre_CudaDataCurandGenerator(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCublasHandle(hypre_handle) hypre_CudaDataCublasHandle(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCusparseHandle(hypre_handle) hypre_CudaDataCusparseHandle(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCudaComputeStream(hypre_handle) hypre_CudaDataCudaComputeStream(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubBinGrowth(hypre_handle) hypre_CudaDataCubBinGrowth(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubMinBin(hypre_handle) hypre_CudaDataCubMinBin(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubMaxBin(hypre_handle) hypre_CudaDataCubMaxBin(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubMaxCachedBytes(hypre_handle) hypre_CudaDataCubMaxCachedBytes(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubDevAllocator(hypre_handle) hypre_CudaDataCubDevAllocator(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCubUvmAllocator(hypre_handle) hypre_CudaDataCubUvmAllocator(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCudaDevice(hypre_handle) hypre_CudaDataCudaDevice(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCudaComputeStreamNum(hypre_handle) hypre_CudaDataCudaComputeStreamNum(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleCudaReduceBuffer(hypre_handle) hypre_CudaDataCudaReduceBuffer(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleStructCommRecvBuffer(hypre_handle) hypre_CudaDataStructCommRecvBuffer(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleStructCommSendBuffer(hypre_handle) hypre_CudaDataStructCommSendBuffer(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleStructCommRecvBufferSize(hypre_handle) hypre_CudaDataStructCommRecvBufferSize(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleStructCommSendBufferSize(hypre_handle) hypre_CudaDataStructCommSendBufferSize(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmUseCusparse(hypre_handle) hypre_CudaDataSpgemmUseCusparse(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmAlgorithm(hypre_handle) hypre_CudaDataSpgemmAlgorithm(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmRownnzEstimateMethod(hypre_handle) hypre_CudaDataSpgemmRownnzEstimateMethod(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmRownnzEstimateNsamples(hypre_handle) hypre_CudaDataSpgemmRownnzEstimateNsamples(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmRownnzEstimateMultFactor(hypre_handle) hypre_CudaDataSpgemmRownnzEstimateMultFactor(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleSpgemmHashType(hypre_handle) hypre_CudaDataSpgemmHashType(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleDeviceAllocator(hypre_handle) hypre_CudaDataDeviceAllocator(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleUseGpuRand(hypre_handle) hypre_CudaDataUseGpuRand(hypre_HandleCudaData(hypre_handle))
#define hypre_HandleUserDeviceMalloc(hypre_handle) ((hypre_handle) -> user_device_malloc)
#define hypre_HandleUserDeviceMfree(hypre_handle) ((hypre_handle) -> user_device_free)
#define hypre_HandleUmpireResourceMan(hypre_handle) ((hypre_handle) -> umpire_rm)
#define hypre_HandleUmpireDevicePoolSize(hypre_handle) ((hypre_handle) -> umpire_device_pool_size)
#define hypre_HandleUmpireUMPoolSize(hypre_handle) ((hypre_handle) -> umpire_um_pool_size)
#define hypre_HandleUmpireHostPoolSize(hypre_handle) ((hypre_handle) -> umpire_host_pool_size)
#define hypre_HandleUmpirePinnedPoolSize(hypre_handle) ((hypre_handle) -> umpire_pinned_pool_size)
#define hypre_HandleUmpireBlockSize(hypre_handle) ((hypre_handle) -> umpire_block_size)
#define hypre_HandleUmpireDevicePoolName(hypre_handle) ((hypre_handle) -> umpire_device_pool_name)
#define hypre_HandleUmpireUMPoolName(hypre_handle) ((hypre_handle) -> umpire_um_pool_name)
#define hypre_HandleUmpireHostPoolName(hypre_handle) ((hypre_handle) -> umpire_host_pool_name)
#define hypre_HandleUmpirePinnedPoolName(hypre_handle) ((hypre_handle) -> umpire_pinned_pool_name)
#define hypre_HandleOwnUmpireDevicePool(hypre_handle) ((hypre_handle) -> own_umpire_device_pool)
#define hypre_HandleOwnUmpireUMPool(hypre_handle) ((hypre_handle) -> own_umpire_um_pool)
#define hypre_HandleOwnUmpireHostPool(hypre_handle) ((hypre_handle) -> own_umpire_host_pool)
#define hypre_HandleOwnUmpirePinnedPool(hypre_handle) ((hypre_handle) -> own_umpire_pinned_pool)
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef HYPRE_GSELIM_H
#define HYPRE_GSELIM_H
#define hypre_gselim(A,x,n,error) \
{ \
HYPRE_Int j,k,m; \
HYPRE_Real factor; \
HYPRE_Real divA; \
error = 0; \
if (n == 1) /* A is 1x1 */ \
{ \
if (A[0] != 0.0) \
{ \
x[0] = x[0]/A[0]; \
} \
else \
{ \
error++; \
} \
} \
else/* A is nxn. Forward elimination */ \
{ \
for (k = 0; k < n-1; k++) \
{ \
if (A[k*n+k] != 0.0) \
{ \
divA = 1.0/A[k*n+k]; \
for (j = k+1; j < n; j++) \
{ \
if (A[j*n+k] != 0.0) \
{ \
factor = A[j*n+k]*divA; \
for (m = k+1; m < n; m++) \
{ \
A[j*n+m] -= factor * A[k*n+m]; \
} \
x[j] -= factor * x[k]; \
} \
} \
} \
} \
/* Back Substitution */ \
for (k = n-1; k > 0; --k) \
{ \
if (A[k*n+k] != 0.0) \
{ \
x[k] /= A[k*n+k]; \
for (j = 0; j < k; j++) \
{ \
if (A[j*n+k] != 0.0) \
{ \
x[j] -= x[k] * A[j*n+k]; \
} \
} \
} \
} \
if (A[0] != 0.0) x[0] /= A[0]; \
} \
}
#endif /* #ifndef HYPRE_GSELIM_H */
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header file for hypre_IntArray struct for holding an array of integers
*
*****************************************************************************/
#ifndef hypre_INTARRAY_HEADER
#define hypre_INTARRAY_HEADER
/*--------------------------------------------------------------------------
* hypre_IntArray
*--------------------------------------------------------------------------*/
typedef struct
{
/* pointer to data and size of data */
HYPRE_Int *data;
HYPRE_Int size;
/* memory location of array data */
HYPRE_MemoryLocation memory_location;
} hypre_IntArray;
/*--------------------------------------------------------------------------
* Accessor functions for the IntArray structure
*--------------------------------------------------------------------------*/
#define hypre_IntArrayData(array) ((array) -> data)
#define hypre_IntArraySize(array) ((array) -> size)
#define hypre_IntArrayMemoryLocation(array) ((array) -> memory_location)
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/* amg_linklist.c */
void hypre_dispose_elt ( hypre_LinkList element_ptr );
void hypre_remove_point ( hypre_LinkList *LoL_head_ptr, hypre_LinkList *LoL_tail_ptr,
HYPRE_Int measure, HYPRE_Int index, HYPRE_Int *lists, HYPRE_Int *where );
hypre_LinkList hypre_create_elt ( HYPRE_Int Item );
void hypre_enter_on_lists ( hypre_LinkList *LoL_head_ptr, hypre_LinkList *LoL_tail_ptr,
HYPRE_Int measure, HYPRE_Int index, HYPRE_Int *lists, HYPRE_Int *where );
/* binsearch.c */
HYPRE_Int hypre_BinarySearch ( HYPRE_Int *list, HYPRE_Int value, HYPRE_Int list_length );
HYPRE_Int hypre_BigBinarySearch ( HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length );
HYPRE_Int hypre_BinarySearch2 ( HYPRE_Int *list, HYPRE_Int value, HYPRE_Int low, HYPRE_Int high,
HYPRE_Int *spot );
HYPRE_Int *hypre_LowerBound( HYPRE_Int *first, HYPRE_Int *last, HYPRE_Int value );
HYPRE_BigInt *hypre_BigLowerBound( HYPRE_BigInt *first, HYPRE_BigInt *last, HYPRE_BigInt value );
/* log.c */
HYPRE_Int hypre_Log2( HYPRE_Int p );
/* complex.c */
#ifdef HYPRE_COMPLEX
HYPRE_Complex hypre_conj( HYPRE_Complex value );
HYPRE_Real hypre_cabs( HYPRE_Complex value );
HYPRE_Real hypre_creal( HYPRE_Complex value );
HYPRE_Real hypre_cimag( HYPRE_Complex value );
#else
#define hypre_conj(value) value
#define hypre_cabs(value) fabs(value)
#define hypre_creal(value) value
#define hypre_cimag(value) 0.0
#endif
/* general.c */
#ifdef HYPRE_USING_MEMORY_TRACKER
hypre_MemoryTracker* hypre_memory_tracker();
#endif
hypre_Handle* hypre_handle();
hypre_Handle* hypre_HandleCreate();
HYPRE_Int hypre_HandleDestroy(hypre_Handle *hypre_handle_);
HYPRE_Int hypre_SetDevice(hypre_int device_id, hypre_Handle *hypre_handle_);
HYPRE_Int hypre_GetDevice(hypre_int *device_id);
HYPRE_Int hypre_GetDeviceCount(hypre_int *device_count);
HYPRE_Int hypre_GetDeviceLastError();
HYPRE_Int hypre_UmpireInit(hypre_Handle *hypre_handle_);
HYPRE_Int hypre_UmpireFinalize(hypre_Handle *hypre_handle_);
/* qsort.c */
void hypre_swap ( HYPRE_Int *v, HYPRE_Int i, HYPRE_Int j );
void hypre_swap_c ( HYPRE_Complex *v, HYPRE_Int i, HYPRE_Int j );
void hypre_swap2 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int i, HYPRE_Int j );
void hypre_BigSwap2 ( HYPRE_BigInt *v, HYPRE_Real *w, HYPRE_Int i, HYPRE_Int j );
void hypre_swap2i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j );
void hypre_BigSwap2i ( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j );
void hypre_swap3i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j );
void hypre_swap3_d ( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j );
void hypre_swap3_d_perm(HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j );
void hypre_BigSwap4_d ( HYPRE_Real *v, HYPRE_BigInt *w, HYPRE_Int *z, HYPRE_Int *y, HYPRE_Int i,
HYPRE_Int j );
void hypre_swap_d ( HYPRE_Real *v, HYPRE_Int i, HYPRE_Int j );
void hypre_qsort0 ( HYPRE_Int *v, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort1 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right );
void hypre_BigQsort1 ( HYPRE_BigInt *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort2i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right );
void hypre_BigQsort2i( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort2 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort2_abs ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort3i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort3ir ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort3( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right );
void hypre_qsort3_abs ( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left,
HYPRE_Int right );
void hypre_BigQsort4_abs ( HYPRE_Real *v, HYPRE_BigInt *w, HYPRE_Int *z, HYPRE_Int *y,
HYPRE_Int left, HYPRE_Int right );
void hypre_qsort_abs ( HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right );
void hypre_BigSwapbi(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j );
void hypre_BigQsortbi( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right );
void hypre_BigSwapLoc(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j );
void hypre_BigQsortbLoc( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right );
void hypre_BigSwapb2i(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j );
void hypre_BigQsortb2i( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left,
HYPRE_Int right );
void hypre_BigSwap( HYPRE_BigInt *v, HYPRE_Int i, HYPRE_Int j );
void hypre_BigQsort0( HYPRE_BigInt *v, HYPRE_Int left, HYPRE_Int right );
void hypre_topo_sort(const HYPRE_Int *row_ptr, const HYPRE_Int *col_inds, const HYPRE_Complex *data,
HYPRE_Int *ordering, HYPRE_Int n);
void hypre_dense_topo_sort(const HYPRE_Complex *L, HYPRE_Int *ordering, HYPRE_Int n,
HYPRE_Int is_col_major);
/* qsplit.c */
HYPRE_Int hypre_DoubleQuickSplit ( HYPRE_Real *values, HYPRE_Int *indices, HYPRE_Int list_length,
HYPRE_Int NumberKept );
/* random.c */
/* HYPRE_CUDA_GLOBAL */ void hypre_SeedRand ( HYPRE_Int seed );
/* HYPRE_CUDA_GLOBAL */ HYPRE_Int hypre_RandI ( void );
/* HYPRE_CUDA_GLOBAL */ HYPRE_Real hypre_Rand ( void );
/* prefix_sum.c */
/**
* Assumed to be called within an omp region.
* Let x_i be the input of ith thread.
* The output of ith thread y_i = x_0 + x_1 + ... + x_{i-1}
* Additionally, sum = x_0 + x_1 + ... + x_{nthreads - 1}
* Note that always y_0 = 0
*
* @param workspace at least with length (nthreads+1)
* workspace[tid] will contain result for tid
* workspace[nthreads] will contain sum
*/
void hypre_prefix_sum(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int *workspace);
/**
* This version does prefix sum in pair.
* Useful when we prefix sum of diag and offd in tandem.
*
* @param worksapce at least with length 2*(nthreads+1)
* workspace[2*tid] and workspace[2*tid+1] will contain results for tid
* workspace[3*nthreads] and workspace[3*nthreads + 1] will contain sums
*/
void hypre_prefix_sum_pair(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2,
HYPRE_Int *workspace);
/**
* @param workspace at least with length 3*(nthreads+1)
* workspace[3*tid:3*tid+3) will contain results for tid
*/
void hypre_prefix_sum_triple(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2,
HYPRE_Int *sum2, HYPRE_Int *in_out3, HYPRE_Int *sum3, HYPRE_Int *workspace);
/**
* n prefix-sums together.
* workspace[n*tid:n*(tid+1)) will contain results for tid
* workspace[nthreads*tid:nthreads*(tid+1)) will contain sums
*
* @param workspace at least with length n*(nthreads+1)
*/
void hypre_prefix_sum_multiple(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int n,
HYPRE_Int *workspace);
/* hopscotch_hash.c */
#ifdef HYPRE_USING_OPENMP
/* Check if atomic operations are available to use concurrent hopscotch hash table */
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
#define HYPRE_USING_ATOMIC
//#elif defined _MSC_VER // JSP: haven't tested, so comment out for now
//#define HYPRE_USING_ATOMIC
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//#define HYPRE_USING_ATOMIC
//#include <stdatomic.h>
#endif
#endif // HYPRE_USING_OPENMP
#ifdef HYPRE_HOPSCOTCH
#ifdef HYPRE_USING_ATOMIC
// concurrent hopscotch hashing is possible only with atomic supports
#define HYPRE_CONCURRENT_HOPSCOTCH
#endif
#endif
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
typedef struct
{
HYPRE_Int volatile timestamp;
omp_lock_t lock;
} hypre_HopscotchSegment;
#endif
/**
* The current typical use case of unordered set is putting input sequence
* with lots of duplication (putting all colidx received from other ranks),
* followed by one sweep of enumeration.
* Since the capacity is set to the number of inputs, which is much larger
* than the number of unique elements, we optimize for initialization and
* enumeration whose time is proportional to the capacity.
* For initialization and enumeration, structure of array (SoA) is better
* for vectorization, cache line utilization, and so on.
*/
typedef struct
{
HYPRE_Int volatile segmentMask;
HYPRE_Int volatile bucketMask;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* volatile segments;
#endif
HYPRE_Int *volatile key;
hypre_uint *volatile hopInfo;
HYPRE_Int *volatile hash;
} hypre_UnorderedIntSet;
typedef struct
{
HYPRE_Int volatile segmentMask;
HYPRE_Int volatile bucketMask;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* volatile segments;
#endif
HYPRE_BigInt *volatile key;
hypre_uint *volatile hopInfo;
HYPRE_BigInt *volatile hash;
} hypre_UnorderedBigIntSet;
typedef struct
{
hypre_uint volatile hopInfo;
HYPRE_Int volatile hash;
HYPRE_Int volatile key;
HYPRE_Int volatile data;
} hypre_HopscotchBucket;
typedef struct
{
hypre_uint volatile hopInfo;
HYPRE_BigInt volatile hash;
HYPRE_BigInt volatile key;
HYPRE_Int volatile data;
} hypre_BigHopscotchBucket;
/**
* The current typical use case of unoredered map is putting input sequence
* with no duplication (inverse map of a bijective mapping) followed by
* lots of lookups.
* For lookup, array of structure (AoS) gives better cache line utilization.
*/
typedef struct
{
HYPRE_Int volatile segmentMask;
HYPRE_Int volatile bucketMask;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* volatile segments;
#endif
hypre_HopscotchBucket* volatile table;
} hypre_UnorderedIntMap;
typedef struct
{
HYPRE_Int volatile segmentMask;
HYPRE_Int volatile bucketMask;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* volatile segments;
#endif
hypre_BigHopscotchBucket* volatile table;
} hypre_UnorderedBigIntMap;
/* merge_sort.c */
/**
* Why merge sort?
* 1) Merge sort can take advantage of eliminating duplicates.
* 2) Merge sort is more efficiently parallelizable than qsort
*/
HYPRE_Int hypre_MergeOrderedArrays( HYPRE_Int size1, HYPRE_Int *array1, HYPRE_Int size2,
HYPRE_Int *array2, HYPRE_Int *size3_ptr, HYPRE_Int **array3_ptr);
void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3,
HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2);
void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **sorted);
void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len,
HYPRE_BigInt **sorted);
void hypre_sort_and_create_inverse_map(HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out,
hypre_UnorderedIntMap *inverse_map);
void hypre_big_sort_and_create_inverse_map(HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out,
hypre_UnorderedBigIntMap *inverse_map);
#if defined(HYPRE_USING_GPU)
HYPRE_Int hypre_SyncCudaComputeStream(hypre_Handle *hypre_handle);
HYPRE_Int hypre_SyncCudaDevice(hypre_Handle *hypre_handle);
HYPRE_Int hypre_ResetCudaDevice(hypre_Handle *hypre_handle);
HYPRE_Int hypreDevice_DiagScaleVector(HYPRE_Int n, HYPRE_Int *A_i, HYPRE_Complex *A_data,
HYPRE_Complex *x, HYPRE_Complex beta, HYPRE_Complex *y);
HYPRE_Int hypreDevice_DiagScaleVector2(HYPRE_Int n, HYPRE_Int *A_i, HYPRE_Complex *A_data,
HYPRE_Complex *x, HYPRE_Complex beta, HYPRE_Complex *y, HYPRE_Complex *z);
HYPRE_Int hypreDevice_IVAXPY(HYPRE_Int n, HYPRE_Complex *a, HYPRE_Complex *x, HYPRE_Complex *y);
HYPRE_Int hypreDevice_IVAXPYMarked(HYPRE_Int n, HYPRE_Complex *a, HYPRE_Complex *x,
HYPRE_Complex *y, HYPRE_Int *marker, HYPRE_Int marker_val);
HYPRE_Int hypreDevice_BigIntFilln(HYPRE_BigInt *d_x, size_t n, HYPRE_BigInt v);
HYPRE_Int hypreDevice_Filln(HYPRE_Complex *d_x, size_t n, HYPRE_Complex v);
HYPRE_Int hypreDevice_Scalen(HYPRE_Complex *d_x, size_t n, HYPRE_Complex v);
#endif
HYPRE_Int hypre_CurandUniform( HYPRE_Int n, HYPRE_Real *urand, HYPRE_Int set_seed,
hypre_ulonglongint seed, HYPRE_Int set_offset, hypre_ulonglongint offset);
HYPRE_Int hypre_CurandUniformSingle( HYPRE_Int n, float *urand, HYPRE_Int set_seed,
hypre_ulonglongint seed, HYPRE_Int set_offset, hypre_ulonglongint offset);
HYPRE_Int hypre_bind_device(HYPRE_Int myid, HYPRE_Int nproc, MPI_Comm comm);
/* nvtx.c */
void hypre_GpuProfilingPushRangeColor(const char *name, HYPRE_Int cid);
void hypre_GpuProfilingPushRange(const char *name);
void hypre_GpuProfilingPopRange();
/* utilities.c */
HYPRE_Int hypre_multmod(HYPRE_Int a, HYPRE_Int b, HYPRE_Int mod);
void hypre_partition1D(HYPRE_Int n, HYPRE_Int p, HYPRE_Int j, HYPRE_Int *s, HYPRE_Int *e);
char *hypre_strcpy(char *destination, const char *source);
HYPRE_Int hypre_SetSyncCudaCompute(HYPRE_Int action);
HYPRE_Int hypre_RestoreSyncCudaCompute();
HYPRE_Int hypre_GetSyncCudaCompute(HYPRE_Int *cuda_compute_stream_sync_ptr);
HYPRE_Int hypre_SyncCudaComputeStream(hypre_Handle *hypre_handle);
/* handle.c */
HYPRE_Int hypre_SetSpGemmUseCusparse( HYPRE_Int use_cusparse );
HYPRE_Int hypre_SetSpGemmAlgorithm( HYPRE_Int value );
HYPRE_Int hypre_SetSpGemmRownnzEstimateMethod( HYPRE_Int value );
HYPRE_Int hypre_SetSpGemmRownnzEstimateNSamples( HYPRE_Int value );
HYPRE_Int hypre_SetSpGemmRownnzEstimateMultFactor( HYPRE_Real value );
HYPRE_Int hypre_SetSpGemmHashType( char value );
HYPRE_Int hypre_SetUseGpuRand( HYPRE_Int use_gpurand );
HYPRE_Int hypre_SetGaussSeidelMethod( HYPRE_Int gs_method );
HYPRE_Int hypre_SetUserDeviceMalloc(GPUMallocFunc func);
HYPRE_Int hypre_SetUserDeviceMfree(GPUMfreeFunc func);
/* int_array.c */
hypre_IntArray* hypre_IntArrayCreate( HYPRE_Int size );
HYPRE_Int hypre_IntArrayDestroy( hypre_IntArray *array );
HYPRE_Int hypre_IntArrayInitialize_v2( hypre_IntArray *array,
HYPRE_MemoryLocation memory_location );
HYPRE_Int hypre_IntArrayInitialize( hypre_IntArray *array );
HYPRE_Int hypre_IntArrayCopy( hypre_IntArray *x, hypre_IntArray *y );
hypre_IntArray* hypre_IntArrayCloneDeep_v2( hypre_IntArray *x,
HYPRE_MemoryLocation memory_location );
hypre_IntArray* hypre_IntArrayCloneDeep( hypre_IntArray *x );
HYPRE_Int hypre_IntArraySetConstantValues( hypre_IntArray *v, HYPRE_Int value );
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/**
* Hopscotch hash is modified from the code downloaded from
* https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing
* with the following terms of usage
*/
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//------------------------------------------------------------------------------
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose is hereby granted without fee,
// provided that due acknowledgments to the authors are provided and
// this permission notice appears in all copies of the software.
// The software is provided "as is". There is no warranty of any kind.
//
//Authors:
// Maurice Herlihy
// Brown University
// and
// Nir Shavit
// Tel-Aviv University
// and
// Moran Tzafrir
// Tel-Aviv University
//
// Date: July 15, 2008.
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
// Modified : Jongsoo Park (jongsoo.park@intel.com)
// Oct 1, 2015.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef hypre_HOPSCOTCH_HASH_HEADER
#define hypre_HOPSCOTCH_HASH_HEADER
//#include <strings.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <math.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#include "_hypre_utilities.h"
// Potentially architecture specific features used here:
// __sync_val_compare_and_swap
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* This next section of code is here instead of in _hypre_utilities.h to get
* around some portability issues with Visual Studio. By putting it here, we
* can explicitly include this '.h' file in a few files in hypre and compile
* them with C++ instead of C (VS does not support C99 'inline').
******************************************************************************/
#ifdef HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_val_compare_and_swap(ptr, oldval, newval);
//#elif defind _MSC_VER
//return _InterlockedCompareExchange((long *)ptr, newval, oldval);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//atomic_compare_exchange_strong(atomic_ptr, &oldval, newval);
//return oldval;
#endif
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_fetch_and_add(ptr, value);
//#elif defined _MSC_VER
//return _InterlockedExchangeAdd((long *)ptr, value);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//return atomic_fetch_add(atomic_ptr, value);
#endif
}
#else // !HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
if (*ptr == oldval)
{
*ptr = newval;
return oldval;
}
else { return *ptr; }
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
HYPRE_Int oldval = *ptr;
*ptr += value;
return oldval;
}
#endif // !HYPRE_USING_ATOMIC
/******************************************************************************/
// Constants ................................................................
#define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32)
#define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024)
#define HYPRE_HOPSCOTCH_HASH_EMPTY (0)
#define HYPRE_HOPSCOTCH_HASH_BUSY (1)
// Small Utilities ..........................................................
static inline HYPRE_Int
first_lsb_bit_indx( hypre_uint x )
{
HYPRE_Int pos;
#if defined(_MSC_VER) || defined(__MINGW64__)
if (x == 0)
{
pos = 0;
}
else
{
for (pos = 1; !(x & 1); ++pos)
{
x >>= 1;
}
}
#else
pos = ffs(x);
#endif
return (pos - 1);
}
/**
* hypre_Hash is adapted from xxHash with the following license.
*/
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/***************************************
* Constants
***************************************/
#define HYPRE_XXH_PRIME32_1 2654435761U
#define HYPRE_XXH_PRIME32_2 2246822519U
#define HYPRE_XXH_PRIME32_3 3266489917U
#define HYPRE_XXH_PRIME32_4 668265263U
#define HYPRE_XXH_PRIME32_5 374761393U
#define HYPRE_XXH_PRIME64_1 11400714785074694791ULL
#define HYPRE_XXH_PRIME64_2 14029467366897019727ULL
#define HYPRE_XXH_PRIME64_3 1609587929392839161ULL
#define HYPRE_XXH_PRIME64_4 9650029242287828579ULL
#define HYPRE_XXH_PRIME64_5 2870177450012600261ULL
#define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT)
static inline HYPRE_BigInt
hypre_BigHash( HYPRE_BigInt input )
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64)
{
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_BigHash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
#ifdef HYPRE_BIGINT
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64)
{
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
static inline void
hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_HopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_BigHopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
{
omp_set_lock(&move_segment->lock);
}
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (start_seg != move_segment)
{
omp_unset_lock(&move_segment->lock);
}
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s );
void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s );
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m );
void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m );
// Query Operations .........................................................
static inline HYPRE_Int
hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
#endif
HYPRE_Int bucket = hash & s->bucketMask;
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
{
return 0;
}
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
{
return 1;
}
else { return 0; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
return 1;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return 0;
}
#endif
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
{
return 1;
}
}
return 0;
}
static inline HYPRE_Int
hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)];
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
{
return 0;
}
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
{
return 1;
}
else { return 0; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
return 1;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return 0;
}
#endif
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
{
return 1;
}
}
return 0;
}
/**
* @ret -1 if key doesn't exist
*/
static inline HYPRE_Int
hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
#endif
hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
{
return -1;
}
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
{
return elmAry->data;
}
else { return -1; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
{
return currElm->data;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return -1;
}
#endif
hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
{
return currBucket->data;
}
}
return -1;
}
static inline
HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)];
#endif
hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
{
return -1;
}
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
{
return elmAry->data;
}
else { return -1; }
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
{
return currElm->data;
}
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
{
return -1;
}
#endif
hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
{
return currBucket->data;
}
}
return -1;
}
//status Operations .........................................................
static inline
HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline
HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len );
HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len );
//modification Operations ...................................................
static inline void
hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = hash & s->bucketMask;
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline void
hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedBigIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline HYPRE_Int
hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m,
HYPRE_Int key, HYPRE_Int data )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_HopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key, HYPRE_Int data)
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_BigHopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
{
break;
}
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedBigIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
}
while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // hypre_HOPSCOTCH_HASH_HEADER
#ifdef __cplusplus
}
#endif
#endif
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)/wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
c11-ping-pong.c | #if !defined(__llvm__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && \
defined(__GNUC__) && (__GNUC__ <= 6)
#error GCC will not compile this code because of "https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65467"
#else
/* Intel compiler does not support _Atomic hence defines __STDC_NO_ATOMICS__,
* but it supports the atomic_<integer> API. */
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) && \
(!defined(__STDC_NO_ATOMICS__) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1600)))
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdatomic.h>
#ifdef _OPENMP
# include <omp.h>
#else
# error No OpenMP support!
#endif
#ifdef SEQUENTIAL_CONSISTENCY
int load_model = memory_order_seq_cst;
int store_model = memory_order_seq_cst;
#else
int load_model = memory_order_acquire;
int store_model = memory_order_release;
#endif
int main(int argc, char * argv[])
{
int nt = omp_get_max_threads();
#if 1
if (nt != 2) omp_set_num_threads(2);
#else
if (nt < 2) omp_set_num_threads(2);
if (nt % 2 != 0) omp_set_num_threads(nt-1);
#endif
int iterations = (argc>1) ? atoi(argv[1]) : 1000000;
printf("thread ping-pong benchmark\n");
printf("num threads = %d\n", omp_get_max_threads());
printf("iterations = %d\n", iterations);
#ifdef SEQUENTIAL_CONSISTENCY
printf("memory model = %s\n", "seq_cst");
#else
printf("memory model = %s\n", "acq-rel");
#endif
fflush(stdout);
atomic_int left_ready = ATOMIC_VAR_INIT(-1);
atomic_int right_ready = ATOMIC_VAR_INIT(-1);
int left_payload = 0;
int right_payload = 0;
#pragma omp parallel
{
int me = omp_get_thread_num();
/// 0=left 1=right
bool parity = (me % 2 == 0);
int junk = 0;
/// START TIME
#pragma omp barrier
double t0 = omp_get_wtime();
for (int i=0; i<iterations; ++i) {
if (parity) {
/// send to left
left_payload = i;
atomic_store_explicit( &left_ready, i, store_model);
/// recv from right
while (i != atomic_load_explicit( &right_ready, load_model));
//printf("%d: left received %d\n", i, right_payload);
junk += right_payload;
} else {
/// recv from left
while (i != atomic_load_explicit( &left_ready, load_model));
//printf("%d: right received %d\n", i, left_payload);
junk += left_payload;
///send to right
right_payload = i;
atomic_store_explicit( &right_ready, i, store_model);
}
}
/// STOP TIME
#pragma omp barrier
double t1 = omp_get_wtime();
/// PRINT TIME
double dt = t1-t0;
#pragma omp critical
{
printf("total time elapsed = %lf\n", dt);
printf("time per iteration = %e\n", dt/iterations);
printf("%d\n", junk);
}
}
return 0;
}
#else // C11
#error You need C11 atomics for this test!
#endif // C11
#endif // GCC <= 6
|
2.c | #include <stdlib.h>
#include <stdio.h>
#include <mpi.h>
#include <omp.h>
int main(int argc, char **argv)
{
int size,rank;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int namelen, i;
int a[100];
int sum;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(processor_name,&namelen);
printf("Process %d of %d on %s\n",rank,size,processor_name);
sum = 0;
for( i=0; i<100; i++){
a[i]=i;
sum += a[i];
}
printf( "[%d]sequental sum=%d\n",rank, sum );
sum = 0;
// #pragma omp parallel for private(i) shared(a) reduction(+:sum)
#pragma omp parallel private(i) shared(a) reduction(+:sum)
{
int orank;
orank = omp_get_thread_num();
#pragma omp for nowait
for( i=0; i<100; i++)
{
sum += a[i];
}
printf( "[%d,%d] private OpenMp sum=%d\n",rank, orank, sum );
}
printf( "[%d] shared OpenMp sum=%d\n",rank, sum );
MPI_Finalize();
return (0);
}
|
GB_binop__bor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int16)
// C=scalar+B GB (_bind1st__bor_int16)
// C=scalar+B' GB (_bind1st_tran__bor_int16)
// C=A+scalar GB (_bind2nd__bor_int16)
// C=A'+scalar GB (_bind2nd_tran__bor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT16 || GxB_NO_BOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bml_trace_csr_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_trace.h"
#include "bml_trace_csr.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_types_csr.h"
#include "../bml_logger.h"
#include "bml_getters_csr.h"
#include <complex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Calculate the trace of a matrix.
*
* \ingroup trace_group
*
* \param A The matrix to calculate a trace for
* \return the trace of A
*/
double TYPED_FUNC(
bml_trace_csr) (
const bml_matrix_csr_t * A)
{
int N = A->N_;
/* We currently assume sequential mode */
/*
int *A_index = (int *) A->index;
int *A_nnz = (int *) A->nnz;
int *A_localRowMin = (int *) A->domain->localRowMin;
int *A_localRowMax = (int *) A->domain->localRowMax;
*/
REAL_T trace = 0.0;
#pragma omp parallel for default(none) \
shared(N, A) \
reduction(+:trace)
for (int i = 0; i < N; i++)
{
trace +=
*((REAL_T *) TYPED_FUNC(csr_get_row_element) (A->data_[i], i));
}
return (double) REAL_PART(trace);
}
/** Calculate the trace of a matrix multiplication.
* Both matrices must have the same size.
*
* \ingroup trace_group
*
* \param A The matrix A
* \param A The matrix B
* \return the trace of A*B
*/
double TYPED_FUNC(
bml_trace_mult_csr) (
const bml_matrix_csr_t * A,
const bml_matrix_csr_t * B)
{
const int A_N = A->N_;
REAL_T trace = 0.0;
if (A_N != B->N_)
{
LOG_ERROR
("bml_trace_mult_csr: Matrices A and B have different sizes.");
}
#pragma omp parallel for \
reduction(+:trace)
for (int i = 0; i < A_N; i++)
{
int *acols = A->data_[i]->cols_;
REAL_T *avals = (REAL_T *) A->data_[i]->vals_;
const int annz = A->data_[i]->NNZ_;
for (int pos = 0; pos < annz; pos++)
{
REAL_T a = avals[pos];
const int j = acols[pos];
const int bnnz = B->data_[j]->NNZ_;
REAL_T *bvals = (REAL_T *) B->data_[j]->vals_;
int *bcols = B->data_[j]->cols_;
for (int bpos = 0; bpos < bnnz; bpos++)
{
const int k = bcols[bpos];
if (i == k)
{
trace = trace + a * bvals[bpos];
break;
}
}
}
}
return trace;
}
|
DRACC_OMP_048_atomic_interference_yes.c | /*
Incrementaion with a threshold of N. The incrementation is atomic, but the query on the threshold depends on the counter.
The query is not atomic, therefore a Data Race is possible in line 15 to 17.
*/
#include <stdio.h>
#define N 100000
int countervar = 0;
int count(){
#pragma omp target map(tofrom:countervar) device(0)
#pragma omp teams distribute parallel for
for (int i=0; i<N*2; i++){
if(countervar<N){
#pragma omp atomic
countervar++;
}
}
return 0;
}
int main(){
count();
printf("counter: %i expected: %i\n ",countervar,N);
return 0;
} |
GB_binop__isle_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32)
// A*D function (colscale): GB (_AxD__isle_uint32)
// D*A function (rowscale): GB (_DxB__isle_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32)
// C=scalar+B GB (_bind1st__isle_uint32)
// C=scalar+B' GB (_bind1st_tran__isle_uint32)
// C=A+scalar GB (_bind2nd__isle_uint32)
// C=A'+scalar GB (_bind2nd_tran__isle_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__eq_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32)
// A*D function (colscale): GB (_AxD__eq_uint32)
// D*A function (rowscale): GB (_DxB__eq_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32)
// C=scalar+B GB (_bind1st__eq_uint32)
// C=scalar+B' GB (_bind1st_tran__eq_uint32)
// C=A+scalar GB (_bind2nd__eq_uint32)
// C=A'+scalar GB (_bind2nd_tran__eq_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
paralelo.c | /*
- Jose Block
- Francisco Rosal
- Gian Luca Rivera
Compilar: gcc -o secuencial secuencial.c
Ejeutar: ./secuencial
gcc -o secuencial secuencial.c && ./secuencial
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char* argv[]) {
double t_init = omp_get_wtime();
// One dimensional heat dissipation equation
// dT/dt = c*dT/dx
// T(x,0) = t0
// T(0,t) = tl
// T(L,t) = tr
int N, err, time_iterations;
double L, delta_x, delta_t, t0, tl, tr;
double c = 1e-5; //10e-5 m^2/s
time_iterations = 1000;
L = 1;
N = 5000;
t0 = 20;
tl = 90;
tr = 100;
delta_x = L / N;
// delta_t = ((delta_x * delta_x)*0.5)/c;
// double C = (c * delta_t) / (delta_x * delta_x);
double C = 0.5;
// Initialize the temperature vectors
double current_T[N];
double next_T[N];
for (int j = 0; j < N; j++) {
current_T[j] = t0;
next_T[j] = 0.0;
}
// Set the boundary conditions
current_T[0] = tl;
current_T[N-1] = tr;
// j es control de distancia
// i es control de tiempo
int T_i = 0;
#pragma omp parallel num_threads(8) firstprivate(current_T)
while (T_i < time_iterations) {
printf("\n\nTime step: %d\n", T_i);
#pragma omp for schedule(static , 1000)
for (int j = 0; j < N; j++) {
if (j == 0 || j == N-1){
next_T[j] = current_T[j];
} else {
next_T[j] = current_T[j] + C * (current_T[j-1] - 2 * current_T[j] + current_T[j+1]);
}
};
printf("\n");
#pragma omp single
{
for (int j = 0; j < N; j++) {
printf("%f ", next_T[j]);
}
for (int j = 0; j < N; j++) {
current_T[j] = next_T[j];
next_T[j] = 0.0;
}
}
T_i++;
printf("\n");
};
double t_fin = omp_get_wtime();
double delta = t_fin - t_init;
printf("Tiempo: %lf ", delta);
return 0;
} |
fft-cuda.c | /* Copyright 2013, 2015. The Regents of the University of California.
* Copyright 2019. Uecker Lab, University Medical Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* Christian Holme <christian.holme@med.uni-goettingen.de>
*
*
* Internal interface to the CUFFT library used in fft.c.
*/
#include <stdbool.h>
#include <complex.h>
#include <assert.h>
#include <limits.h>
#include "misc/misc.h"
#include "num/multind.h"
#include "fft-cuda.h"
#ifdef USE_CUDA
#include <cufft.h>
#include "num/gpuops.h"
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
struct fft_cuda_plan_s {
cufftHandle cufft;
struct fft_cuda_plan_s* chain;
bool backwards;
long batch;
long idist;
long odist;
};
struct iovec {
long n;
long is;
long os;
};
// detect if flags has blocks of 1's seperated by 0's
static bool noncontiguous_flags(int D, unsigned long flags)
{
bool o = false;
bool z = false;
for (int i = 0; i < D; i++) {
bool curr_bit = MD_IS_SET(flags, i);
if (curr_bit) // found a block of ones
o = true;
if (o && !curr_bit) // found the end of a block of ones
z = true;
if (o && z && curr_bit) // found a second block of ones
return true;
}
return false;
}
static struct fft_cuda_plan_s* fft_cuda_plan0(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
// TODO: This is not optimal, as it will often create separate fft's where they
// are not needed. And since we compute blocks, we could also recurse
// into both blocks...
if (noncontiguous_flags(D, flags))
return NULL;
PTR_ALLOC(struct fft_cuda_plan_s, plan);
unsigned int N = D;
plan->batch = 1;
plan->odist = 0;
plan->idist = 0;
plan->backwards = backwards;
plan->chain = NULL;
struct iovec dims[N];
struct iovec hmdims[N];
assert(0 != flags);
// the cufft interface is strange, but we do our best...
unsigned int k = 0;
unsigned int l = 0;
for (unsigned int i = 0; i < N; i++) {
if (1 == dimensions[i])
continue;
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
assert(k > 0);
int cudims[k];
int cuiemb[k];
int cuoemb[k];
long batchdims[l];
long batchistr[l];
long batchostr[l];
int lis = dims[0].is;
int los = dims[0].os;
if (k > 3)
goto errout;
for (unsigned int i = 0; i < k; i++) {
// assert(dims[i].is == lis);
// assert(dims[i].os == los);
cudims[k - 1 - i] = dims[i].n;
cuiemb[k - 1 - i] = dims[i].n;
cuoemb[k - 1 - i] = dims[i].n;
lis = dims[i].n * dims[i].is;
los = dims[i].n * dims[i].os;
}
for (unsigned int i = 0; i < l; i++) {
batchdims[i] = hmdims[i].n;
batchistr[i] = hmdims[i].is;
batchostr[i] = hmdims[i].os;
}
int istride = dims[0].is;
int ostride = dims[0].os;
int idist = lis;
int odist = los;
int cubs = 1;
// check that batch dimensions can be collapsed to one
unsigned int bi = md_calc_blockdim(l, batchdims, batchistr, hmdims[0].is);
unsigned int bo = md_calc_blockdim(l, batchdims, batchostr, hmdims[0].os);
if (bi != bo)
goto errout;
if (bi > 0) {
idist = hmdims[0].is;
odist = hmdims[0].os;
cubs = md_calc_size(bi, batchdims);
}
if (l != bi) {
// check that batch dimensions can be collapsed to one
if (l - bi != md_calc_blockdim(l - bi, batchdims + bi, batchistr + bi, hmdims[bi].is))
goto errout;
if (l - bo != md_calc_blockdim(l - bo, batchdims + bo, batchostr + bo, hmdims[bo].os))
goto errout;
plan->idist = hmdims[bi].is;
plan->odist = hmdims[bo].os;
plan->batch = md_calc_size(l - bi, batchdims + bi);
}
assert(k <= 3);
int err;
#pragma omp critical
err = cufftPlanMany(&plan->cufft, k,
cudims, cuiemb, istride, idist,
cuoemb, ostride, odist, CUFFT_C2C, cubs);
if (CUFFT_SUCCESS != err)
goto errout;
return PTR_PASS(plan);
errout:
PTR_FREE(plan);
return NULL;
}
static unsigned long find_msb(unsigned long flags)
{
for (unsigned int i = 1; i < CHAR_BIT * sizeof(flags); i *= 2)
flags |= flags >> i;
return (flags + 1) / 2;
}
struct fft_cuda_plan_s* fft_cuda_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, flags, ostrides, istrides, backwards);
if (NULL != plan)
return plan;
unsigned long msb = find_msb(flags);
if (flags & msb) {
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, msb, ostrides, istrides, backwards);
if (NULL == plan)
return NULL;
plan->chain = fft_cuda_plan(D, dimensions, flags & ~msb, ostrides, ostrides, backwards);
if (NULL == plan->chain) {
fft_cuda_free_plan(plan);
return NULL;
}
return plan;
}
return NULL;
}
void fft_cuda_free_plan(struct fft_cuda_plan_s* cuplan)
{
if (NULL != cuplan->chain)
fft_cuda_free_plan(cuplan->chain);
cufftDestroy(cuplan->cufft);
xfree(cuplan);
}
void fft_cuda_exec(struct fft_cuda_plan_s* cuplan, complex float* dst, const complex float* src)
{
assert(cuda_ondevice(src));
assert(cuda_ondevice(dst));
assert(NULL != cuplan);
int err;
for (int i = 0; i < cuplan->batch; i++) {
if (CUFFT_SUCCESS != (err = cufftExecC2C(cuplan->cufft,
(cufftComplex*)src + i * cuplan->idist,
(cufftComplex*)dst + i * cuplan->odist,
(!cuplan->backwards) ? CUFFT_FORWARD : CUFFT_INVERSE)))
error("CUFFT: %d\n", err);
}
if (NULL != cuplan->chain)
fft_cuda_exec(cuplan->chain, dst, dst);
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.