source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__lnot_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_int16
// op(A') function: GB_tran__lnot_bool_int16
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_int16
(
bool *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distance.h | #pragma once
#include <utils.h>
#ifdef _WINDOWS
#include <immintrin.h>
#include <smmintrin.h>
#include <tmmintrin.h>
#include <intrin.h>
#else
#include <immintrin.h>
#endif
#include <cosine_similarity.h>
#include <iostream>
namespace {
static inline __m128 _mm_mulhi_epi8(__m128i X) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mulhi_epi8_shift32(__m128i X) {
__m128i zero = _mm_setzero_si128();
X = _mm_srli_epi64(X, 32);
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mul_epi8(__m128i X, __m128i Y) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i sign_y = _mm_cmplt_epi8(Y, zero);
__m128i xlo = _mm_unpacklo_epi8(X, sign_x);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
__m128i ylo = _mm_unpacklo_epi8(Y, sign_y);
__m128i yhi = _mm_unpackhi_epi8(Y, sign_y);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_madd_epi16(xlo, ylo), _mm_madd_epi16(xhi, yhi)));
}
static inline __m128 _mm_mul_epi8(__m128i X) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xlo = _mm_unpacklo_epi8(X, sign_x);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_madd_epi16(xlo, xlo), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mul32_pi8(__m128i X, __m128i Y) {
__m128i xlo = _mm_cvtepi8_epi16(X), ylo = _mm_cvtepi8_epi16(Y);
return _mm_cvtepi32_ps(
_mm_unpacklo_epi32(_mm_madd_epi16(xlo, ylo), _mm_setzero_si128()));
}
static inline __m256 _mm256_mul_epi8(__m256i X, __m256i Y) {
__m256i zero = _mm256_setzero_si256();
__m256i sign_x = _mm256_cmpgt_epi8(zero, X);
__m256i sign_y = _mm256_cmpgt_epi8(zero, Y);
__m256i xlo = _mm256_unpacklo_epi8(X, sign_x);
__m256i xhi = _mm256_unpackhi_epi8(X, sign_x);
__m256i ylo = _mm256_unpacklo_epi8(Y, sign_y);
__m256i yhi = _mm256_unpackhi_epi8(Y, sign_y);
return _mm256_cvtepi32_ps(_mm256_add_epi32(_mm256_madd_epi16(xlo, ylo),
_mm256_madd_epi16(xhi, yhi)));
}
static inline __m256 _mm256_mul32_pi8(__m128i X, __m128i Y) {
__m256i xlo = _mm256_cvtepi8_epi16(X), ylo = _mm256_cvtepi8_epi16(Y);
return _mm256_blend_ps(_mm256_cvtepi32_ps(_mm256_madd_epi16(xlo, ylo)),
_mm256_setzero_ps(), 252);
}
static inline float _mm256_reduce_add_ps(__m256 x) {
/* ( x3+x7, x2+x6, x1+x5, x0+x4 ) */
const __m128 x128 =
_mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x));
/* ( -, -, x1+x3+x5+x7, x0+x2+x4+x6 ) */
const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128));
/* ( -, -, -, x0+x1+x2+x3+x4+x5+x6+x7 ) */
const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
/* Conversion to float is a no-op on x86-64 */
return _mm_cvtss_f32(x32);
}
} // namespace
namespace diskann {
// enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
template<typename T>
class Distance {
public:
virtual float compare(const T *a, const T *b, unsigned length) const = 0;
virtual ~Distance() {
}
};
template<typename T>
class DistanceCosine : public Distance<T> {
float compare(const T *a, const T *b, unsigned length) const {
return diskann::compute_cosine_similarity<T>(a, b, length);
}
};
class DistanceL2Int8 : public Distance<int8_t> {
public:
float compare(const int8_t *a, const int8_t *b, unsigned size) const {
int32_t result = 0;
#ifdef _WINDOWS
#ifdef USE_AVX2
__m256 r = _mm256_setzero_ps();
char * pX = (char *) a, *pY = (char *) b;
while (size >= 32) {
__m256i r1 = _mm256_subs_epi8(_mm256_loadu_si256((__m256i *) pX),
_mm256_loadu_si256((__m256i *) pY));
r = _mm256_add_ps(r, _mm256_mul_epi8(r1, r1));
pX += 32;
pY += 32;
size -= 32;
}
while (size > 0) {
__m128i r2 = _mm_subs_epi8(_mm_loadu_si128((__m128i *) pX),
_mm_loadu_si128((__m128i *) pY));
r = _mm256_add_ps(r, _mm256_mul32_pi8(r2, r2));
pX += 4;
pY += 4;
size -= 4;
}
r = _mm256_hadd_ps(_mm256_hadd_ps(r, r), r);
return r.m256_f32[0] + r.m256_f32[4];
#else
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
#endif
#else
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
#endif
}
};
class DistanceL2UInt8 : public Distance<uint8_t> {
public:
float compare(const uint8_t *a, const uint8_t *b, unsigned size) const {
uint32_t result = 0;
#ifndef _WINDOWS
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
#endif
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
};
class DistanceL2 : public Distance<float> {
public:
#ifndef _WINDOWS
float compare(const float *a, const float *b, unsigned size) const
__attribute__((hot)) {
a = (const float *) __builtin_assume_aligned(a, 32);
b = (const float *) __builtin_assume_aligned(b, 32);
#else
float compare(const float *a, const float *b, unsigned size) const {
#endif
float result = 0;
#ifdef USE_AVX2
// assume size is divisible by 8
_u16 niters = size / 8;
__m256 sum = _mm256_setzero_ps();
for (_u16 j = 0; j < niters; j++) {
// scope is a[8j:8j+7], b[8j:8j+7]
// load a_vec
if (j < (niters - 1)) {
_mm_prefetch((char *) (a + 8 * (j + 1)), _MM_HINT_T0);
_mm_prefetch((char *) (b + 8 * (j + 1)), _MM_HINT_T0);
}
__m256 a_vec = _mm256_load_ps(a + 8 * j);
// load b_vec
__m256 b_vec = _mm256_load_ps(b + 8 * j);
// a_vec - b_vec
__m256 tmp_vec = _mm256_sub_ps(a_vec, b_vec);
/*
// (a_vec - b_vec)**2
__m256 tmp_vec2 = _mm256_mul_ps(tmp_vec, tmp_vec);
// accumulate sum
sum = _mm256_add_ps(sum, tmp_vec2);
*/
// sum = (tmp_vec**2) + sum
sum = _mm256_fmadd_ps(tmp_vec, tmp_vec, sum);
}
// horizontal add sum
result = _mm256_reduce_add_ps(sum);
#else
#ifndef _WINDOWS
#pragma omp simd reduction(+ : result) aligned(a, b : 32)
#endif
for (_s32 i = 0; i < (_s32) size; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
#endif
return result;
}
};
// Slow implementations of the distance functions for machines without AVX2
template<typename T>
class SlowDistanceL2Int : public Distance<T> {
virtual float compare(const T *a, const T *b, unsigned length) const {
uint32_t result = 0;
for (_u32 i = 0; i < length; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
};
class SlowDistanceL2Float : public Distance<float> {
virtual float compare(const float *a, const float *b,
unsigned length) const {
float result = 0.0f;
for (_u32 i = 0; i < length; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
return result;
}
};
class AVXDistanceL2Int8 : public Distance<int8_t> {
public:
virtual float compare(const int8_t *a, const int8_t *b,
unsigned int length) const {
#ifndef _WINDOWS
int32_t result = 0;
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) length; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
#else
__m128 r = _mm_setzero_ps();
__m128i r1;
while (length >= 16) {
r1 = _mm_subs_epi8(_mm_load_si128((__m128i *) a),
_mm_load_si128((__m128i *) b));
r = _mm_add_ps(r, _mm_mul_epi8(r1));
a += 16;
b += 16;
length -= 16;
}
r = _mm_hadd_ps(_mm_hadd_ps(r, r), r);
float res = r.m128_f32[0];
if (length >= 8) {
__m128 r2 = _mm_setzero_ps();
__m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 8)),
_mm_load_si128((__m128i *) (b - 8)));
r2 = _mm_add_ps(r2, _mm_mulhi_epi8(r3));
a += 8;
b += 8;
length -= 8;
r2 = _mm_hadd_ps(_mm_hadd_ps(r2, r2), r2);
res += r2.m128_f32[0];
}
if (length >= 4) {
__m128 r2 = _mm_setzero_ps();
__m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 12)),
_mm_load_si128((__m128i *) (b - 12)));
r2 = _mm_add_ps(r2, _mm_mulhi_epi8_shift32(r3));
res += r2.m128_f32[0] + r2.m128_f32[1];
}
return res;
}
#endif
};
class AVXDistanceL2Float : public Distance<float> {
public:
virtual float compare(const float *a, const float *b,
unsigned int length) const {
#ifndef _WINDOWS
float result = 0;
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) length; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
return result;
}
#else
__m128 diff, v1, v2;
__m128 sum = _mm_set1_ps(0);
while (length >= 4) {
v1 = _mm_loadu_ps(a);
a += 4;
v2 = _mm_loadu_ps(b);
b += 4;
diff = _mm_sub_ps(v1, v2);
sum = _mm_add_ps(sum, _mm_mul_ps(diff, diff));
length -= 4;
}
return sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2] +
sum.m128_f32[3];
}
#endif
};
template<typename T>
class DistanceInnerProduct : public Distance<T> {
public:
float inner_product(const T *a, const T *b, unsigned size) const {
float result = 0;
#ifdef __GNUC__
#ifdef __AVX__
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1); \
tmp2 = _mm256_loadu_ps(addr2); \
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (size + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = (float *) a;
const float *r = (float *) b;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__((aligned(32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_loadu_ps(unpack);
if (DR) {
AVX_DOT(e_l, e_r, sum, l0, r0);
}
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] +
unpack[5] + unpack[6] + unpack[7];
#else
#ifdef __SSE2__
#define SSE_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm128_loadu_ps(addr1); \
tmp2 = _mm128_loadu_ps(addr2); \
tmp1 = _mm128_mul_ps(tmp1, tmp2); \
dest = _mm128_add_ps(dest, tmp1);
__m128 sum;
__m128 l0, l1, l2, l3;
__m128 r0, r1, r2, r3;
unsigned D = (size + 3) & ~3U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = a;
const float *r = b;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0};
sum = _mm_load_ps(unpack);
switch (DR) {
case 12:
SSE_DOT(e_l + 8, e_r + 8, sum, l2, r2);
case 8:
SSE_DOT(e_l + 4, e_r + 4, sum, l1, r1);
case 4:
SSE_DOT(e_l, e_r, sum, l0, r0);
default:
break;
}
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
SSE_DOT(l, r, sum, l0, r0);
SSE_DOT(l + 4, r + 4, sum, l1, r1);
SSE_DOT(l + 8, r + 8, sum, l2, r2);
SSE_DOT(l + 12, r + 12, sum, l3, r3);
}
_mm_storeu_ps(unpack, sum);
result += unpack[0] + unpack[1] + unpack[2] + unpack[3];
#else
float dot0, dot1, dot2, dot3;
const float *last = a + size;
const float *unroll_group = last - 3;
/* Process 4 items with each loop for efficiency. */
while (a < unroll_group) {
dot0 = a[0] * b[0];
dot1 = a[1] * b[1];
dot2 = a[2] * b[2];
dot3 = a[3] * b[3];
result += dot0 + dot1 + dot2 + dot3;
a += 4;
b += 4;
}
/* Process last 0-3 pixels. Not needed for standard vector lengths. */
while (a < last) {
result += *a++ * *b++;
}
#endif
#endif
#endif
return result;
}
float compare(const T *a, const T *b, unsigned size)
const { // since we use normally minimization objective for distance
// comparisons, we are returning 1/x.
float result = inner_product(a, b, size);
// if (result < 0)
// return std::numeric_limits<float>::max();
// else
return -result;
}
};
template<typename T>
class DistanceFastL2
: public DistanceInnerProduct<T> { // currently defined only for float.
// templated for future use.
public:
float norm(const T *a, unsigned size) const {
float result = 0;
#ifdef __GNUC__
#ifdef __AVX__
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (size + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = (float *) a;
const float *e_l = l + DD;
float unpack[8] __attribute__((aligned(32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_loadu_ps(unpack);
if (DR) {
AVX_L2NORM(e_l, sum, l0);
}
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] +
unpack[5] + unpack[6] + unpack[7];
#else
#ifdef __SSE2__
#define SSE_L2NORM(addr, dest, tmp) \
tmp = _mm128_loadu_ps(addr); \
tmp = _mm128_mul_ps(tmp, tmp); \
dest = _mm128_add_ps(dest, tmp);
__m128 sum;
__m128 l0, l1, l2, l3;
unsigned D = (size + 3) & ~3U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = a;
const float *e_l = l + DD;
float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0};
sum = _mm_load_ps(unpack);
switch (DR) {
case 12:
SSE_L2NORM(e_l + 8, sum, l2);
case 8:
SSE_L2NORM(e_l + 4, sum, l1);
case 4:
SSE_L2NORM(e_l, sum, l0);
default:
break;
}
for (unsigned i = 0; i < DD; i += 16, l += 16) {
SSE_L2NORM(l, sum, l0);
SSE_L2NORM(l + 4, sum, l1);
SSE_L2NORM(l + 8, sum, l2);
SSE_L2NORM(l + 12, sum, l3);
}
_mm_storeu_ps(unpack, sum);
result += unpack[0] + unpack[1] + unpack[2] + unpack[3];
#else
float dot0, dot1, dot2, dot3;
const float *last = a + size;
const float *unroll_group = last - 3;
/* Process 4 items with each loop for efficiency. */
while (a < unroll_group) {
dot0 = a[0] * a[0];
dot1 = a[1] * a[1];
dot2 = a[2] * a[2];
dot3 = a[3] * a[3];
result += dot0 + dot1 + dot2 + dot3;
a += 4;
}
/* Process last 0-3 pixels. Not needed for standard vector lengths. */
while (a < last) {
result += (*a) * (*a);
a++;
}
#endif
#endif
#endif
return result;
}
using DistanceInnerProduct<T>::compare;
float compare(const T *a, const T *b, float norm,
unsigned size) const { // not implement
float result = -2 * DistanceInnerProduct<T>::inner_product(a, b, size);
result += norm;
return result;
}
};
} // namespace diskann
|
GB_unaryop__minv_fp32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint32
// op(A') function: GB_tran__minv_fp32_uint32
// C type: float
// A type: uint32_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint32
(
float *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(24*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(24*t3+Nx+11,256));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),64*t4+62);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
rose_complexCondition.c | // Contributed by Jeff Keasler
// 5/24/2010
#include "omp.h"
void goo(int numAB)
{
double *c;
double *bufLoc;
int k_nom_22;
#if 0
#else
#pragma omp parallel for private (k_nom_22) firstprivate (numAB)
for (k_nom_22 = 0; k_nom_22 <= numAB * numAB * 3 - 1; k_nom_22 += 1) {
#endif
bufLoc[k_nom_22] = c[k_nom_22];
}
return ;
}
|
pairwise3.c | /* Generated by Cython 0.25.2 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"-Wno-unused-function",
"-Wno-maybe-uninitialized",
"-O3",
"-ffast-math",
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
]
},
"module_name": "pairwise3"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_25_2"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000)
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__pairwise3
#define __PYX_HAVE_API__pairwise3
#include <math.h>
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"pairwise3.pyx",
"stringsource",
};
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "View.MemoryView":103
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":275
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":326
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":951
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":103
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":326
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":951
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); // PROTO
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs,
char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'pairwise3' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static CYTHON_INLINE double __pyx_f_9pairwise3_euclidean_distance(__Pyx_memviewslice, int, int, int); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "pairwise3"
int __pyx_module_is_main_pairwise3 = 0;
/* Implementation of 'pairwise3' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_D[] = "D";
static const char __pyx_k_M[] = "M";
static const char __pyx_k_N[] = "N";
static const char __pyx_k_O[] = "O";
static const char __pyx_k_X[] = "X";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dist[] = "dist";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_float64[] = "float64";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pairwise3[] = "pairwise3";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Users_ethen_machine_learning_py[] = "/Users/ethen/machine-learning/python/cython/pairwise3.pyx";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_D;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_M;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_s_N;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_kp_s_Users_ethen_machine_learning_py;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_X;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_dist;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float64;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pairwise3;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_9pairwise3_pairwise3(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__10;
static PyObject *__pyx_slice__11;
static PyObject *__pyx_slice__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_codeobj__15;
/* "pairwise3.pyx":16
* # to call a "GIL-less" function, we place nogil after it;
* # note that we can't interact with python objects inside
* cdef inline double euclidean_distance(double[:, :] X, int i, int j, int N) nogil: # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible
*/
static CYTHON_INLINE double __pyx_f_9pairwise3_euclidean_distance(__Pyx_memviewslice __pyx_v_X, int __pyx_v_i, int __pyx_v_j, int __pyx_v_N) {
int __pyx_v_k;
double __pyx_v_tmp;
double __pyx_v_d;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "pairwise3.pyx":22
* cdef:
* int k
* double tmp, d = 0.0 # <<<<<<<<<<<<<<
*
* for k in range(N):
*/
__pyx_v_d = 0.0;
/* "pairwise3.pyx":24
* double tmp, d = 0.0
*
* for k in range(N): # <<<<<<<<<<<<<<
* tmp = X[i, k] - X[j, k]
* d += tmp * tmp
*/
__pyx_t_1 = __pyx_v_N;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_k = __pyx_t_2;
/* "pairwise3.pyx":25
*
* for k in range(N):
* tmp = X[i, k] - X[j, k] # <<<<<<<<<<<<<<
* d += tmp * tmp
*
*/
__pyx_t_3 = __pyx_v_i;
__pyx_t_4 = __pyx_v_k;
__pyx_t_5 = __pyx_v_j;
__pyx_t_6 = __pyx_v_k;
__pyx_v_tmp = ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_3 * __pyx_v_X.strides[0]) ) + __pyx_t_4 * __pyx_v_X.strides[1]) ))) - (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_5 * __pyx_v_X.strides[0]) ) + __pyx_t_6 * __pyx_v_X.strides[1]) ))));
/* "pairwise3.pyx":26
* for k in range(N):
* tmp = X[i, k] - X[j, k]
* d += tmp * tmp # <<<<<<<<<<<<<<
*
* return sqrt(d)
*/
__pyx_v_d = (__pyx_v_d + (__pyx_v_tmp * __pyx_v_tmp));
}
/* "pairwise3.pyx":28
* d += tmp * tmp
*
* return sqrt(d) # <<<<<<<<<<<<<<
*
* def pairwise3(double[:, :] X):
*/
__pyx_r = sqrt(__pyx_v_d);
goto __pyx_L0;
/* "pairwise3.pyx":16
* # to call a "GIL-less" function, we place nogil after it;
* # note that we can't interact with python objects inside
* cdef inline double euclidean_distance(double[:, :] X, int i, int j, int N) nogil: # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pairwise3.pyx":30
* return sqrt(d)
*
* def pairwise3(double[:, :] X): # <<<<<<<<<<<<<<
*
* cdef:
*/
/* Python wrapper */
static PyObject *__pyx_pw_9pairwise3_1pairwise3(PyObject *__pyx_self, PyObject *__pyx_arg_X); /*proto*/
static PyMethodDef __pyx_mdef_9pairwise3_1pairwise3 = {"pairwise3", (PyCFunction)__pyx_pw_9pairwise3_1pairwise3, METH_O, 0};
static PyObject *__pyx_pw_9pairwise3_1pairwise3(PyObject *__pyx_self, PyObject *__pyx_arg_X) {
__Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("pairwise3 (wrapper)", 0);
assert(__pyx_arg_X); {
__pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_arg_X); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 30, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
__Pyx_AddTraceback("pairwise3.pairwise3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9pairwise3_pairwise3(__pyx_self, __pyx_v_X);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9pairwise3_pairwise3(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X) {
int __pyx_v_i;
int __pyx_v_j;
double __pyx_v_dist;
int __pyx_v_M;
int __pyx_v_N;
__Pyx_memviewslice __pyx_v_D = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
__Pyx_RefNannySetupContext("pairwise3", 0);
/* "pairwise3.pyx":35
* int i, j
* double dist
* int M = X.shape[0], N = X.shape[1] # <<<<<<<<<<<<<<
* double[:, :] D = np.zeros((M, M), dtype = np.float64)
*
*/
__pyx_v_M = (__pyx_v_X.shape[0]);
__pyx_v_N = (__pyx_v_X.shape[1]);
/* "pairwise3.pyx":36
* double dist
* int M = X.shape[0], N = X.shape[1]
* double[:, :] D = np.zeros((M, M), dtype = np.float64) # <<<<<<<<<<<<<<
*
* # parallelize this over the outermost loop, using the prange function
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_5);
if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_D = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "pairwise3.pyx":39
*
* # parallelize this over the outermost loop, using the prange function
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(M):
* for j in range(M):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
/* "pairwise3.pyx":40
* # parallelize this over the outermost loop, using the prange function
* with nogil, parallel():
* for i in prange(M): # <<<<<<<<<<<<<<
* for j in range(M):
* dist = euclidean_distance(X, i, j, N)
*/
__pyx_t_7 = __pyx_v_M;
if (1 == 0) abort();
{
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_dist) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_dist = ((double)__PYX_NAN());
__pyx_v_j = ((int)0xbad0bad0);
/* "pairwise3.pyx":41
* with nogil, parallel():
* for i in prange(M):
* for j in range(M): # <<<<<<<<<<<<<<
* dist = euclidean_distance(X, i, j, N)
* D[i, j] = dist
*/
__pyx_t_10 = __pyx_v_M;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "pairwise3.pyx":42
* for i in prange(M):
* for j in range(M):
* dist = euclidean_distance(X, i, j, N) # <<<<<<<<<<<<<<
* D[i, j] = dist
* D[j, i] = dist
*/
__pyx_v_dist = __pyx_f_9pairwise3_euclidean_distance(__pyx_v_X, __pyx_v_i, __pyx_v_j, __pyx_v_N);
/* "pairwise3.pyx":43
* for j in range(M):
* dist = euclidean_distance(X, i, j, N)
* D[i, j] = dist # <<<<<<<<<<<<<<
* D[j, i] = dist
*
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_j;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_D.data + __pyx_t_12 * __pyx_v_D.strides[0]) ) + __pyx_t_13 * __pyx_v_D.strides[1]) )) = __pyx_v_dist;
/* "pairwise3.pyx":44
* dist = euclidean_distance(X, i, j, N)
* D[i, j] = dist
* D[j, i] = dist # <<<<<<<<<<<<<<
*
* return D
*/
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_i;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_D.data + __pyx_t_14 * __pyx_v_D.strides[0]) ) + __pyx_t_15 * __pyx_v_D.strides[1]) )) = __pyx_v_dist;
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "pairwise3.pyx":39
*
* # parallelize this over the outermost loop, using the prange function
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(M):
* for j in range(M):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "pairwise3.pyx":46
* D[j, i] = dist
*
* return D # <<<<<<<<<<<<<<
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_D, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "pairwise3.pyx":30
* return sqrt(d)
*
* def pairwise3(double[:, :] X): # <<<<<<<<<<<<<<
*
* cdef:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("pairwise3.pairwise3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_X, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_D, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error)
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error)
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error)
} else {
/* "View.MemoryView":121
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":127
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 127, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(1, 127, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":128
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":130
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":131
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 131, __pyx_L1_error)
/* "View.MemoryView":130
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":133
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":134
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 134, __pyx_L1_error)
/* "View.MemoryView":133
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":136
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":137
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":136
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":138
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error)
__pyx_t_5 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":139
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_6;
/* "View.MemoryView":142
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":143
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":145
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":146
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 146, __pyx_L1_error)
/* "View.MemoryView":145
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":149
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_7 = 0;
__pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_dim = __pyx_t_8;
__pyx_v_idx = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":150
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":151
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
__pyx_t_3 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__PYX_ERR(1, 151, __pyx_L1_error)
/* "View.MemoryView":150
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":152
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":149
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":155
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":156
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":157
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":155
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":158
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":159
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":160
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":158
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":162
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 162, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":164
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":167
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":168
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":169
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":172
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":173
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 174, __pyx_L1_error)
/* "View.MemoryView":173
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":177
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":178
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 178, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 178, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":179
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":180
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":169
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":120
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":184
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":186
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":187
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":188
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":189
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 190, __pyx_L1_error)
/* "View.MemoryView":189
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":191
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":192
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":193
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":194
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":195
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":196
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":197
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":198
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":201
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":203
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":205
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":211
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":214
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":216
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":217
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":220
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":221
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":220
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":224
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":225
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":226
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":224
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":230
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":233
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":236
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 236, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":245
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":248
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 248, __pyx_L1_error)
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":249
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":251
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 277, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 277, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":278
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":280
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":296
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":300
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":303
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":305
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":341
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 341, __pyx_L3_error)
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 341, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 341, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":342
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":343
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":344
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":345
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 345, __pyx_L1_error)
/* "View.MemoryView":346
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":347
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":348
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":346
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":344
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":351
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":352
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":353
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":351
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":354
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":355
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":356
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":357
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 357, __pyx_L1_error)
/* "View.MemoryView":356
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":354
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":359
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":360
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":359
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":362
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":364
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":366
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":341
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":368
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyThread_type_lock __pyx_t_5;
PyThread_type_lock __pyx_t_6;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":369
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":370
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* cdef int i
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":369
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
}
/* "View.MemoryView":374
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":375
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":376
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":378
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":380
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":379
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6;
/* "View.MemoryView":378
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":381
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":376
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":383
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":374
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":368
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":385
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":387
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":389
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 389, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 389, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":390
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 390, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 390, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":389
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":392
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":385
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":395
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":396
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":397
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":396
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":399
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 399, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 399, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 399, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":402
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 402, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":403
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":402
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":405
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 405, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":406
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":395
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":408
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":409
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 409, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 409, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":411
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 411, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":412
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 412, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":413
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":414
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":413
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":416
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 416, __pyx_L1_error)
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
/* "View.MemoryView":411
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L3;
}
/* "View.MemoryView":418
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":408
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":420
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":421
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":423
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":424
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 424, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":423
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 423, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":425
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 425, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":426
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":422
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L11_try_end:;
}
/* "View.MemoryView":421
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":428
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":420
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":430
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":434
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 434, __pyx_L1_error)
/* "View.MemoryView":435
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 435, __pyx_L1_error)
/* "View.MemoryView":436
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":434
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 434, __pyx_L1_error)
/* "View.MemoryView":430
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":438
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":440
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":447
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":448
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":449
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":450
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 450, __pyx_L1_error)
/* "View.MemoryView":449
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":451
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":447
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":453
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":455
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":456
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":457
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":456
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":459
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 459, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":463
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":464
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 464, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":463
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":465
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":468
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
/*exception exit:*/{
__Pyx_PyThreadState_declare
__pyx_L6_error:;
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
__Pyx_PyThreadState_assign
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":438
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":470
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":471
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) __PYX_ERR(1, 471, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":472
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 472, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":470
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":474
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":477
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 477, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":480
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 480, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":482
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":486
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":487
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":486
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":488
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":483
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 483, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_9);
/* "View.MemoryView":484
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 484, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":481
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":474
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":490
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":493
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":498
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":499
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 499, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":498
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":501
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":503
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 503, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":504
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":503
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":504
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":490
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":507
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":508
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":509
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
/* "View.MemoryView":508
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":511
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":513
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":514
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
/* "View.MemoryView":513
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":516
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":518
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":519
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
/* "View.MemoryView":518
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":521
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":523
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":524
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
/* "View.MemoryView":523
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":526
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":528
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":529
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":530
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":531
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":532
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":533
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":507
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":539
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":540
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 540, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 540, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":541
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 541, __pyx_L1_error)
/* "View.MemoryView":542
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":539
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":545
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":546
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":545
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":549
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":550
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":549
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":556
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 556, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 556, __pyx_L1_error)
/* "View.MemoryView":554
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":558
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":561
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":562
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":563
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__9, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":562
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":565
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":561
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":568
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":569
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":568
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":572
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":573
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":572
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":576
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":577
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":580
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":581
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":582
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":584
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":585
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":587
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":581
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":589
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":580
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":591
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":592
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":593
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":592
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":595
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":591
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":597
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":598
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":599
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":598
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":597
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":601
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":602
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":601
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":608
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":609
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 609, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":605
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":614
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":615
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 615, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":617
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":619
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":621
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":622
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":627
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 627, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":617
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":629
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":631
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":633
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":634
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 634, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":639
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 639, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":629
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":644
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":645
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":646
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":643
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":649
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":650
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":649
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":652
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":657
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":658
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":657
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":660
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":662
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 662, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":663
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":664
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":665
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 665, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 665, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":666
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":667
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":668
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 668, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__10);
__Pyx_GIVEREF(__pyx_slice__10);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__10);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":669
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":667
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":671
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__11); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 671, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":672
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":666
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":674
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":675
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7);
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_Raise(__pyx_t_7, 0, 0, 0);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__PYX_ERR(1, 675, __pyx_L1_error)
/* "View.MemoryView":674
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":677
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":678
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 678, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":665
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":680
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(1, 680, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":681
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__12);
__Pyx_GIVEREF(__pyx_slice__12);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__12);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":681
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":684
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_7);
__pyx_t_7 = 0;
goto __pyx_L0;
/* "View.MemoryView":652
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":686
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":687
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":688
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":689
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":686
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":696
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":697
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":704
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":708
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 708, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":710
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":711
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 711, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":712
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":710
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":714
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":715
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":721
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":722
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":727
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":728
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":732
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 732, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 732, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":733
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":737
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 737, __pyx_L1_error)
/* "View.MemoryView":734
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 734, __pyx_L1_error)
/* "View.MemoryView":733
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":740
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":741
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":742
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":743
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":744
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":740
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":746
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":747
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 747, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 747, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":748
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":750
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":751
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 751, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":752
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":754
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 754, __pyx_L1_error)
/* "View.MemoryView":760
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":732
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":762
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":763
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":764
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 764, __pyx_L1_error) }
/* "View.MemoryView":765
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 765, __pyx_L1_error) }
/* "View.MemoryView":763
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 763, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 763, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":762
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":768
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":769
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 768, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":768
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 768, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":696
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":793
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":813
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":815
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":816
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":815
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":817
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":818
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 818, __pyx_L1_error)
/* "View.MemoryView":817
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":813
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":821
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":823
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":824
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 824, __pyx_L1_error)
/* "View.MemoryView":823
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":827
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":829
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":830
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":831
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":830
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":828
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":832
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":833
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":834
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":833
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":836
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":832
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":827
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":838
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":839
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":838
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":841
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":843
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":844
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":846
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":846
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":844
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":848
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":849
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":848
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":843
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":851
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":852
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":851
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":854
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":856
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":857
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":856
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":861
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":863
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":864
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":863
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":866
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":867
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":866
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":870
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":871
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":872
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":875
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":876
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":875
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":878
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":880
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":883
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":882
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":885
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":886
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 885, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":881
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":888
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":880
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":890
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":793
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":896
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":898
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":899
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":902
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":903
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 903, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 903, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":904
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":902
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":906
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":907
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":908
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":909
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":908
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":911
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":912
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":913
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":914
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 914, __pyx_L1_error)
/* "View.MemoryView":913
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":911
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":916
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 917, __pyx_L1_error)
/* "View.MemoryView":916
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":919
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":920
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":921
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":920
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":923
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":896
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":929
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":930
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":932
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":933
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":937
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":938
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":939
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":940
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":942
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L6_bool_binop_done:;
if (__pyx_t_6) {
/* "View.MemoryView":943
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 943, __pyx_L1_error)
/* "View.MemoryView":942
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":945
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":929
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":962
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":963
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":962
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":965
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":966
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":967
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 967, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":966
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":969
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":965
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":971
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":972
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":973
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 973, __pyx_L1_error)
/* "View.MemoryView":972
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":975
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 975, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":971
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":978
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":979
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":978
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":993
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":994
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "View.MemoryView":993
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":999
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1001
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1002
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1004
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1004, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1005
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1007
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1008
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1009
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1010
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1011
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1013
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1015
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1016
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1019
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1020
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1021
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1022
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1023
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L5_break;
/* "View.MemoryView":1021
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L5_break:;
/* "View.MemoryView":1025
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1026
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1026, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1027
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1027, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1029
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1030
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1032
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":985
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1035
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1038
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1039
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1039, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1040
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1038
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1042
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1043
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1035
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1046
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1050
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1051
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1052
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1054
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1055
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1057
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1058
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1059
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1060
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_4 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4;
}
/* "View.MemoryView":1046
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1066
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1067
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1067, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1077
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1078
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1079
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1077
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1081
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1082
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1084
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1086
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1070
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1092
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1093
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1094
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1093
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1096
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1092
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1099
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1104
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1105
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1107
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1108
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1109
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1110
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1108
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1112
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1113
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1114
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1115
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1113
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1117
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1118
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1117
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1120
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1099
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1123
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
/* "View.MemoryView":1130
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1131
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1132
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1133
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1135
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1137
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1138
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
/* "View.MemoryView":1136
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1140
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1141
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1142
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1143
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1135
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1145
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1146
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1150
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1151
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1123
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1153
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1156
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1153
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1160
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1163
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1165
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1166
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1168
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1160
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1171
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1180
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1181
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1182
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1183
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1180
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1185
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1186
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1187
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1189
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1171
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1192
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
/* "View.MemoryView":1203
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1204
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1206
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1207
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1208
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 1208, __pyx_L1_error)
/* "View.MemoryView":1207
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1211
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1212
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1213
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1214
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1215
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1217
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1221
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1222
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1223
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1222
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1225
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1226
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
/* "View.MemoryView":1225
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1228
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1230
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1192
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1235
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1238
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1237
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 1237, __pyx_L1_error)
/* "View.MemoryView":1235
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1241
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1242
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
{
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL;
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1242, __pyx_L1_error)
/* "View.MemoryView":1241
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1245
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1246
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1247
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1247, __pyx_L1_error)
/* "View.MemoryView":1246
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1249
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1249, __pyx_L1_error)
}
/* "View.MemoryView":1245
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1252
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
/* "View.MemoryView":1260
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1261
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1263
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1264
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1265
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1268
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1269
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1268
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1270
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1271
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1270
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1273
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1275
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1276
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1277
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1278
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1279
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1277
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1281
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1281, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1276
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1283
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1284
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1284, __pyx_L1_error)
/* "View.MemoryView":1283
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1286
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1288
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1289
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1288
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1291
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 1291, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1292
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1286
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1294
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1297
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1298
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1297
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1299
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1299
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1302
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1305
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1306
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1307
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1308
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1302
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1294
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1310
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1313
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1313, __pyx_L1_error)
/* "View.MemoryView":1314
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1314, __pyx_L1_error)
/* "View.MemoryView":1310
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1316
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1317
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1318
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1320
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1321
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1252
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1324
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1328
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1330
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1331
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1332
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1333
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1335
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1336
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1337
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1338
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1324
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1346
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1350
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1351
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1350
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1346
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1355
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1358
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1355
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1361
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1365
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1366
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1367
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1368
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1367
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1370
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1366
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1372
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1373
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1375
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1361
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1381
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1384
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1385
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1387
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1381
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1395
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1396
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1398
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1399
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1400
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1401
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1398
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1403
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1404
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1406
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"pairwise3.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"pairwise3.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"pairwise3.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"pairwise3._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"pairwise3",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_kp_s_Users_ethen_machine_learning_py, __pyx_k_Users_ethen_machine_learning_py, sizeof(__pyx_k_Users_ethen_machine_learning_py), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_dist, __pyx_k_dist, sizeof(__pyx_k_dist), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pairwise3, __pyx_k_pairwise3, sizeof(__pyx_k_pairwise3), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 24, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 131, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 396, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 425, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 599, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 818, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":131
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "View.MemoryView":134
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":137
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":146
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 190, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":484
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 484, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":556
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 556, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":563
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__9 = PyTuple_New(1); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 563, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__9, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":668
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__10 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__10)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__10);
__Pyx_GIVEREF(__pyx_slice__10);
/* "View.MemoryView":671
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_slice__11 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__11)) __PYX_ERR(1, 671, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__11);
__Pyx_GIVEREF(__pyx_slice__11);
/* "View.MemoryView":682
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_slice__12 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__12)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__12);
__Pyx_GIVEREF(__pyx_slice__12);
/* "View.MemoryView":689
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "pairwise3.pyx":30
* return sqrt(d)
*
* def pairwise3(double[:, :] X): # <<<<<<<<<<<<<<
*
* cdef:
*/
__pyx_tuple__14 = PyTuple_Pack(8, __pyx_n_s_X, __pyx_n_s_X, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_dist, __pyx_n_s_M, __pyx_n_s_N, __pyx_n_s_D); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
__pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_ethen_machine_learning_py, __pyx_n_s_pairwise3, 30, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(0, 30, __pyx_L1_error)
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 283, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 284, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initpairwise3(void); /*proto*/
PyMODINIT_FUNC initpairwise3(void)
#else
PyMODINIT_FUNC PyInit_pairwise3(void); /*proto*/
PyMODINIT_FUNC PyInit_pairwise3(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_pairwise3(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("pairwise3", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_pairwise3) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "pairwise3")) {
if (unlikely(PyDict_SetItemString(modules, "pairwise3", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error)
__pyx_type___pyx_array.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 275, __pyx_L1_error)
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error)
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error)
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "pairwise3.pyx":5
*
* cimport cython
* import numpy as np # <<<<<<<<<<<<<<
* from libc.math cimport sqrt
* from cython.parallel import parallel, prange
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pairwise3.pyx":30
* return sqrt(d)
*
* def pairwise3(double[:, :] X): # <<<<<<<<<<<<<<
*
* cdef:
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_9pairwise3_1pairwise3, NULL, __pyx_n_s_pairwise3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pairwise3, __pyx_t_1) < 0) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pairwise3.pyx":1
* # cython: boundscheck = False # <<<<<<<<<<<<<<
* # cython: wraparound = False
*
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":207
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 207, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 207, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 283, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 284, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":312
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":313
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":535
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 535, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 535, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":981
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "View.MemoryView":1391
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init pairwise3", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init pairwise3");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* BufferFormatCheck */
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
return PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = PyThreadState_GET();
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif // CPython < 3.6
#endif // CYTHON_FAST_PYCALL
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs, NULL);
}
#endif // CYTHON_FAST_PYCCALL
/* GetItemInt */
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
Py_DECREF(obj);
view->obj = NULL;
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
double value = __pyx_PyFloat_AsDouble(obj);
if ((value == (double)-1) && PyErr_Occurred())
return 0;
*(double *) itemp = value;
return 1;
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
#else
res = PyNumber_Int(x);
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
general_basis_get_amp.h | #ifndef _GENERAL_BASIS_GET_AMP_H
#define _GENERAL_BASIS_GET_AMP_H
//#include <limits>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "misc.h"
#include "openmp.h"
//#include <complex>
namespace basis_general {
template<class I,class P=signed char>
std::complex<double> get_amp_rep(general_basis_core<I,P> *B,
const int nt,
I r, // start out with representative state and iterate over all transofmrations.
const I s, // target states to find amplitude of
double k = 0.0,
P sign = 1,
const int depth = 0
)
{
if(nt<=0){
return 1.0;
}
std::complex<double> phase_factor = 0.0;
const int per = B->pers[depth];
const double q = (2.0*M_PI*B->qs[depth])/per;
if(depth < nt-1){
for(int j=0;j<per;j++){
phase_factor += get_amp_rep(B,nt,r,s,k,sign,depth+1);
k += q;
r = B->map_state(r,depth,sign);
}
}
else{
for(int j=0;j<per;j++){
if(r==s){
phase_factor += double(sign)*std::exp(std::complex<double>(0,-k));
}
k += q;
r = B->map_state(r,depth,sign);
}
}
return phase_factor;
}
template<class I,class J,class P=signed char>
int get_amp_general(general_basis_core<I,P> *B,
I s[], // input states in the full basis
J out[], // state amplitudes of state s (full basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
//phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = std::sqrt(norm_r/per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
// same as get_amp_rep, but w/o calling ref_state and check_state
template<class I,class J,class P=signed char>
int get_amp_general_light(general_basis_core<I,P> *B,
I s[], // input states in the symmetry-reduced basis
J out[], // state amplitudes of state s (symmetry-reduced basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
I ss=s[i];
double norm_r = B->check_state(ss);
phase_factor = get_amp_rep(B,nt,ss,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
double norm_r = B->check_state(s[i]);
out_tmp = std::sqrt(norm_r/per_factor);
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
}
#endif
|
VerletNeighborListAsBuild.h | /**
* @file VerletNeighborListAsBuild.h
* @author humig
* @date 21.05.19
*/
#pragma once
#include "AsBuildPairGeneratorFunctor.h"
#include "C08TraversalColorChangeNotify.h"
#include "autopas/containers/verletListsCellBased/verletLists/neighborLists/VerletNeighborListInterface.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class implements a neighbor list that remembers which thread added which particle pair and at which color
* during the build with C08 from LinkedCells.
* @tparam Particle The particle type the class uses.
*/
template <class Particle>
class VerletNeighborListAsBuild : public VerletNeighborListInterface<Particle>, ColorChangeObserver {
/**
* Adds the generator functor for validation checks as friend so it can call checkPair().
* @param true mark that it is for validation checks.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, true>;
/**
* Adds the generator functor for adding pairs as friend so it can call addPair().
* @param false test mark that it is for adding pairs.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, false>;
private:
/**
* Starts the generate functor. _baseLinkedCells has to be set before!
* @tparam useNewton3 If the functor should use newton 3.
* @tparam callCheckInstead If false, start it in generate mode, if true, in check validity mode.
* @param cutoff The cutoff to use for the particle pairs in the functor.
*/
template <bool useNewton3, bool callCheckInstead = false>
void startFunctor(double cutoff) {
internal::AsBuildPairGeneratorFunctor<Particle, callCheckInstead> functor(*this, cutoff);
// Use SoA traversal for generation and AoS traversal for validation check.
constexpr auto dataLayout = callCheckInstead ? DataLayoutOption::aos : DataLayoutOption::soa;
auto traversal = C08TraversalColorChangeNotify<typename VerletListHelpers<Particle>::VerletListParticleCellType,
internal::AsBuildPairGeneratorFunctor<Particle, callCheckInstead>,
dataLayout, useNewton3>(
_baseLinkedCells->getCellBlock().getCellsPerDimensionWithHalo(), &functor,
_baseLinkedCells->getInteractionLength(), _baseLinkedCells->getCellBlock().getCellLength(), this);
_baseLinkedCells->iteratePairwise(&traversal);
}
public:
/**
* This type represents the neighbor list that each thread has for each color.
*/
using ThreadNeighborList = std::unordered_map<Particle *, std::vector<Particle *>>;
/**
* This type represents the thread lists for all colors.
*/
using ColorNeighborList = std::vector<ThreadNeighborList>;
/**
* This type represents the SoA neighbor list that each thread has for each color.
*/
using SoAThreadNeighborList = std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>>;
/**
* This type represents the SoA thread lists for all colors.
*/
using SoAColorNeighborList = std::vector<SoAThreadNeighborList>;
/**
* Constructor for the VerletNeighborListAsBuild. Does only default initialization.
*/
VerletNeighborListAsBuild() : _neighborList{}, _soaListIsValid(false) {}
ContainerOption getContainerType() const override { return ContainerOption::varVerletListsAsBuild; }
/**
* @copydoc VerletNeighborListInterface::buildNeighborList()
*
* It executes C08 on the passed LinkedCells container and saves the resulting pairs in the neighbor list, remembering
* the thread and current color for each pair.
*/
void buildNeighborList(LinkedCells<typename VerletListHelpers<Particle>::VerletListParticleCellType,
typename VerletListHelpers<Particle>::SoAArraysType> &linkedCells,
bool useNewton3) override {
_soaListIsValid = false;
_baseLinkedCells = &linkedCells;
unsigned int maxNumThreads = autopas_get_max_threads();
for (int c = 0; c < 8; c++) {
std::vector<ThreadNeighborList> &colorList = _neighborList[c];
colorList.resize(maxNumThreads);
for (unsigned int i = 0; i < maxNumThreads; i++) {
colorList[i].clear();
}
}
if (useNewton3) {
startFunctor<true>(linkedCells.getInteractionLength());
} else {
startFunctor<false>(linkedCells.getInteractionLength());
}
}
bool checkNeighborListValidity(bool useNewton3, double cutoff) override {
_allPairsPresent = true;
if (_baseLinkedCells == nullptr) return false;
constexpr bool callCheck = true;
if (useNewton3) {
startFunctor<true, callCheck>(cutoff);
} else {
startFunctor<false, callCheck>(cutoff);
}
return _allPairsPresent;
}
/**
* Returns the internal AoS neighbor list. Should be used by traversals.
*
* The internal neighbor list is a vector of the neighbor lists of each color. Each of those neighbor lists is a
* vector that contains one neighbor list for each thread. Each of those neighbor lists is a map from each particle to
* a vector containing its neighbors.
*
* @return the internal AoS neighbor list.
*/
const auto &getInternalNeighborList() { return _neighborList; }
/**
* Returns the internal SoA neighbor list. Should be used by traversals.
*
* The internal SoA neighbor list is a vector of the SoA neighbor lists of each color. Each of those SoA neighbor
* lists is a vector that contains one SoA neighbor list for each thread. Each of those SoA neighbor lists is a vector
* of vectors where the i-th vector contains the indices of all neighbors of particle i in the SoA.
*
* @return the internal SoA neighbor list.
*/
const auto &getInternalSoANeighborList() { return _soaNeighborList; }
void receiveColorChange(unsigned long newColor) override { _currentColor = newColor; }
/**
* @see getInternalSoANeighborList()
*/
void generateSoAFromAoS() override {
// Generate a map from pointer to particle index in the SoA. This works, because during loadSoA"()" the particles
// are loaded in the same order.
std::unordered_map<Particle *, size_t> _aos2soaMap;
_aos2soaMap.reserve(_baseLinkedCells->getNumParticles());
size_t i = 0;
for (auto iter = _baseLinkedCells->begin(); iter.isValid(); ++iter, ++i) {
_aos2soaMap[&(*iter)] = i;
}
constexpr int numColors = 8;
for (int color = 0; color < numColors; color++) {
unsigned int numThreads = _neighborList[color].size();
_soaNeighborList[color].resize(numThreads);
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel num_threads(numThreads)
#endif
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(static)
#endif
for (unsigned int thread = 0; thread < numThreads; thread++) {
auto ¤tThreadList = _soaNeighborList[color][thread];
currentThreadList.clear();
currentThreadList.resize(_aos2soaMap.size());
for (const auto &pair : _neighborList[color][thread]) {
size_t indexFirst = _aos2soaMap[pair.first];
for (const auto &second : pair.second) {
size_t indexSecond = _aos2soaMap[second];
currentThreadList[indexFirst].push_back(indexSecond);
}
}
}
}
_soaListIsValid = true;
}
/**
* Loads the particle information in the SoA and returns a pointer to the filled SoA.
* @tparam TFunctor The type of the functor to use for loading the particles.
* @param f The functor to use for loading the particles.
* @return A pointer to the SoA filled. Ownership is *not* passed.
*/
template <class TFunctor>
auto *loadSoA(TFunctor *f) {
_soa.clear();
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoALoader(cell, _soa, offset);
offset += cell.numParticles();
}
return &_soa;
}
/**
* Extracts the particle information out of the SoA returned by loadSoA() before.
* @tparam TFunctor The type of the functor to use for extracting the particles.
* @param f The functor to use for extracting the particles.
*/
template <class TFunctor>
void extractSoA(TFunctor *f) {
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoAExtractor(cell, _soa, offset);
offset += cell.numParticles();
}
}
bool isSoAListValid() const override { return _soaListIsValid; }
long getNumberOfNeighborPairs() const override {
long numPairs = 0;
for (const auto &colorList : _neighborList) {
for (const auto &threadList : colorList) {
numPairs += threadList.size();
}
}
return numPairs;
}
private:
/**
* Called from VarVerletListGeneratorFunctor
*/
void addPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
_neighborList[_currentColor][currentThreadIndex][first].push_back(second);
}
/**
* Called from VarVerletListGeneratorFunctor
*/
void checkPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
// Check all neighbor lists for the pair, but the one that the pair would be in if it was not moved first.
auto &oldThreadNeighborList = _neighborList[_currentColor][currentThreadIndex];
if (isPairInList(oldThreadNeighborList, first, second)) {
for (int color = 0; color < 8; color++) {
for (unsigned int thread = 0; thread < _neighborList[color].size(); thread++) {
if (not isPairInList(_neighborList[_currentColor][currentThreadIndex], first, second)) {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
return;
}
}
}
} else {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
}
}
/**
* Helper method for checkPair()
* @return True, if the pair is present, false otherwise.
*/
bool isPairInList(ThreadNeighborList ¤tNeighborList, Particle *first, Particle *second) {
auto iteratorFound = std::find(currentNeighborList[first].begin(), currentNeighborList[first].end(), second);
return iteratorFound != currentNeighborList[first].end();
}
private:
/**
* The internal AoS neighbor list. For format, see getInternalNeighborList().
*/
std::array<ColorNeighborList, 8> _neighborList;
/**
* The LinkedCells object this neighbor list should use to build.
*/
LinkedCells<typename VerletListHelpers<Particle>::VerletListParticleCellType,
typename VerletListHelpers<Particle>::SoAArraysType> *_baseLinkedCells;
/**
* The internal SoA neighbor list. For format, see getInternalSoANeighborList().
*/
std::array<SoAColorNeighborList, 8> _soaNeighborList;
/**
* The SoA used.
*/
SoA<typename Particle::SoAArraysType> _soa;
/**
* If the SoA is valid, see isSoAListValid().
*/
bool _soaListIsValid;
/**
* The current color in the traversal during the build of the neighbor list.
*/
static int _currentColor;
#if defined(AUTOPAS_OPENMP)
#pragma omp threadprivate(_currentColor)
#endif
/**
* Used in checkNeighborListValidity(). Set to false in the pair generating functor.
*/
std::atomic<bool> _allPairsPresent;
};
template <class Particle>
int VerletNeighborListAsBuild<Particle>::_currentColor = 0;
} // namespace autopas
|
GB_unaryop__lnot_fp64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_uint8
// op(A') function: GB_tran__lnot_fp64_uint8
// C type: double
// A type: uint8_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_uint8
(
double *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint64)
// A*D function (colscale): GB (_AxD__lor_uint64)
// D*A function (rowscale): GB (_DxB__lor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint64)
// C=scalar+B GB (_bind1st__lor_uint64)
// C=scalar+B' GB (_bind1st_tran__lor_uint64)
// C=A+scalar GB (_bind2nd__lor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lor_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
linearsearchOpenMp.c | /*AUTHOR - SATYAM RAMAWAT
Date - 12th May 2020
OpenMP program that processes all of the elements of a list in parallel.
Performed by the Linear search algorithm
Program has been run in MacOS 3.1 GHz Dual-Core Intel Core i5
Used: Clang
clang -Xpreprocessor -fopenmp -lomp linearsearchOpenMp.c -o ls
*/
#include<stdio.h>
#include <omp.h>
int main()
{
int a[100],n,i,search,found=0;
printf("Enter the number of elements:");
scanf("%d",&n);
for(i=0;i<n;i++)
{
a[i]=i;
printf("%d",a[i]);
}
printf("Enter the number to search:");
scanf("%d",&search);
#pragma omp parallel for num_threads(50)
for(i=0;i<n;i++)
{
if(a[i]==search)
{
printf("The element is present at location %d",i+1);
found=1;
}
}
if(found==0)
{
printf("The element is not found");
}
return 0;
} |
VerletClusterListsTest.h | /**
* @file VerletClusterListsTest.h
* @author nguyen
* @date 21.10.18
*/
#pragma once
#include <gtest/gtest.h>
#include "AutoPasTestBase.h"
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/verletClusterLists/traversals/VerletClustersColoringTraversal.h"
#include "autopas/particles/Particle.h"
#include "autopas/utils/WrapOpenMP.h"
#include "autopasTools/generators/RandomGenerator.h"
#include "mocks/MockFunctor.h"
#include "testingHelpers/commonTypedefs.h"
class VerletClusterListsTest : public AutoPasTestBase {};
#if defined(AUTOPAS_OPENMP)
class CollectParticlesPerThreadFunctor
: public autopas::Functor<autopas::Particle, autopas::FullParticleCell<autopas::Particle>> {
public:
static int _currentColor;
#pragma omp threadprivate(_currentColor)
std::array<std::vector<std::set<Particle *>>, 8> _particlesPerThreadPerColor;
public:
CollectParticlesPerThreadFunctor() : Functor(0) {}
void initTraversal() override {
for (int i = 0; i < 8; i++) {
_particlesPerThreadPerColor[i].resize(autopas::autopas_get_max_threads());
}
}
void AoSFunctor(Particle &i, Particle &j, bool newton3) override {
auto threadNum = autopas::autopas_get_thread_num();
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&i);
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&j);
}
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
bool isAppropriateClusterSize(unsigned int clusterSize, autopas::DataLayoutOption::Value dataLayout) const override {
return dataLayout == autopas::DataLayoutOption::aos; // this functor supports clusters only for aos!
}
static void nextColor(int newColor) { _currentColor = newColor; }
};
int CollectParticlesPerThreadFunctor::_currentColor = 0;
class ColoringTraversalWithColorChangeNotify
: public autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true> {
public:
ColoringTraversalWithColorChangeNotify(CollectParticlesPerThreadFunctor *functor,
std::function<void(int)> whenColorChanges)
: autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true>(functor) {
_whenColorChanges = std::move(whenColorChanges);
}
void notifyColorChange(unsigned long newColor) override { _whenColorChanges(newColor); }
private:
std::function<void(int)> _whenColorChanges;
};
#endif |
mysql_netauth_fmt_plug.c | /* Cracker for MySQL network authentication hashes. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mysqlna;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mysqlna);
#else
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024// tuned K8-dual HT
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mysqlna"
#define FORMAT_NAME "MySQL Network Authentication"
#define FORMAT_TAG "$mysqlna$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define HEX_LENGTH 40
#define CIPHERTEXT_LENGTH 90
#define BINARY_SIZE 20
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_NONE
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests mysqlna_tests[] = {
{"$mysqlna$2D52396369653E4626293B2F75244D3871507A39*7D63098BEE381A51AA6DF11E307E46BD4F8B6E0C", "openwall"},
{"$mysqlna$615c2b5e79656f7d4931594e5b5d416c7b483365*c3a70da2874db890eb2f0a5e3ea80b2ed17da0d0", "openwall"},
{"$mysqlna$295a687c59275452214b366b39776d3f31757b2e*7343f45c94cccd646a1b29bbfad064a9ee5c0380", "overlord magnum"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned char scramble[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
p = ciphertext + FORMAT_TAG_LEN;
q = strstr(ciphertext, "*");
if (!q)
return 0;
if (q - p != HEX_LENGTH)
return 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7F && p < q)
p++;
if (q - p != 0)
return 0;
if (strlen(p) < HEX_LENGTH)
return 0;
q = p + 1;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p - 1 == HEX_LENGTH;
}
static char* split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
strncpy(out, ciphertext, sizeof(out));
strlwr(out);
return out;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += FORMAT_TAG_LEN; /* skip over "$mysqlna$" */
p = strtokm(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.scramble[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char stage1_hash[20];
unsigned char inner_hash[20];
unsigned char token[20];
SHA_CTX ctx;
int i;
unsigned char *p = (unsigned char*)crypt_out[index];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(stage1_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, stage1_hash, 20);
SHA1_Final(inner_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->scramble, 20);
SHA1_Update(&ctx, inner_hash, 20);
SHA1_Final(token, &ctx);
for (i = 0; i < 20; i++) {
p[i] = token[i] ^ stage1_hash[i];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mysqlna_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mysqlna = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
mysqlna_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mysqlna_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
PVMappingFilterTimeWeek.h | /* * MIT License
*
* © ESI Group, 2015
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
*
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
*
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef PVFILTER_PVMAPPINGFILTERTIMEWEEK_H
#define PVFILTER_PVMAPPINGFILTERTIMEWEEK_H
#include <inendi/PVMappingFilter.h>
#include <pvkernel/rush/PVNraw.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <unicode/calendar.h>
#include <pvcop/types/datetime.h>
#include <omp.h>
using namespace icu_67;
namespace Inendi
{
class PVMappingFilterTimeWeek : public PVMappingFilter
{
friend class time_mapping;
public:
PVMappingFilterTimeWeek();
public:
pvcop::db::array operator()(PVCol const col, PVRush::PVNraw const& nraw) override
{
auto f = nraw.column(col).formatter();
const pvcop::db::array& array = nraw.column(col);
pvcop::db::array dest("number_uint32", array.size());
auto& dest_array = dest.to_core_array<uint32_t>();
if (std::string(f->name()) == "datetime") {
auto& core_array = array.to_core_array<uint32_t>();
#pragma omp parallel for
for (size_t row = 0; row < array.size(); row++) {
tm local_tm;
const time_t t = static_cast<int64_t>(core_array[row]);
pvcop::types::formatter_datetime::gmtime_r(&t, &local_tm);
dest_array[row] =
local_tm.tm_sec +
60 * (local_tm.tm_min + 60 * (local_tm.tm_hour + 24 * local_tm.tm_wday));
}
} else if (std::string(f->name()) == "datetime_us") {
auto& core_array = array.to_core_array<boost::posix_time::ptime>();
#pragma omp parallel for
for (size_t row = 0; row < array.size(); row++) {
const boost::posix_time::ptime t = core_array[row];
dest_array[row] = t.time_of_day().total_seconds() +
60 * 60 * 24 * t.date().day_of_week().as_number();
}
} else {
assert(std::string(f->name()) == "datetime_ms" && "Unknown datetime formatter");
auto& core_array = array.to_core_array<uint64_t>();
std::vector<std::unique_ptr<Calendar>> calendars;
for (size_t i = 0; i < (size_t)omp_get_max_threads(); i++) {
UErrorCode err = U_ZERO_ERROR;
calendars.emplace_back(Calendar::createInstance(err));
if (not U_SUCCESS(err)) {
throw std::runtime_error("Can't create calendar to compute mapping");
}
}
#pragma omp parallel
{
std::unique_ptr<Calendar>& cal = calendars[omp_get_thread_num()];
UErrorCode err = U_ZERO_ERROR;
#pragma omp for
for (size_t row = 0; row < array.size(); row++) {
cal->setTime(core_array[row], err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t sec = cal->get(UCAL_SECOND, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t min = cal->get(UCAL_MINUTE, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t hour = cal->get(UCAL_HOUR_OF_DAY, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t wday = cal->get(UCAL_DAY_OF_WEEK, err);
if (not U_SUCCESS(err)) {
continue;
}
dest_array[row] = sec + 60 * (min + 60 * (hour + 24 * wday));
}
}
}
return dest;
}
std::unordered_set<std::string> list_usable_type() const override { return {"time"}; }
QString get_human_name() const override { return QString("Week"); }
pvcop::db::array get_minmax(pvcop::db::array const&, pvcop::db::selection const&) const override
{
pvcop::db::array res("number_uint32", 2);
auto res_array = res.to_core_array<uint32_t>();
res_array[0] = 0;
res_array[1] = 7 * 24 * 3600 - 1;
return res;
}
CLASS_FILTER_NOPARAM(PVMappingFilterTimeWeek)
};
}
#endif
|
batch_gen.c | /* Copyright 2021. Uecker Lab. University Medical Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors: Moritz Blumenthal
*/
#include <assert.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "nlops/nlop.h"
#include "misc/misc.h"
#include "misc/types.h"
#include "misc/debug.h"
#include "iter/iter6.h"
#include "batch_gen.h"
static void rand_draw_data(unsigned int* rand_seed, long N, long perm[N], long Nb)
{
UNUSED(Nb);
for (int i = 0; i < N; i++) {
#pragma omp critical
perm[i] = rand_r(rand_seed) % N;
}
}
static void rand_perm_data(unsigned int* rand_seed, long N, long perm[N], long Nb)
{
UNUSED(Nb);
bool drawn[N];
for (int i = 0; i < N; i++)
drawn[i] = false;
for (int i = 0; i < N; i++) {
#pragma omp critical
perm[i] = rand_r(rand_seed) % (N - i);
for (int j = 0; j < N; j++)
if (drawn[j] && perm[i] >= j)
perm[i] ++;
drawn[perm[i]] = true;
}
}
static void rand_perm_batches(unsigned int* rand_seed, long N, long perm[N], long Nb)
{
long perm_batch[N / Nb];
rand_perm_data(rand_seed, N / Nb, perm_batch, 0);
for (int i = 0; i < N / Nb; i++)
for (int j = 0; j < Nb; j++)
perm[Nb * i + j] = perm_batch[i] * Nb + j;
for (int i = (N / Nb) * Nb; i < N; i++)
perm[i] = i;
}
struct batch_gen_data_s {
INTERFACE(nlop_data_t);
long D; //number of arrays
long N;
const long** dims;
const long** ostrs;
const long** istrs;
const int* bat_idx;
long Nb;
long Nt;
const complex float** data;
long start;
long* perm;
enum BATCH_GEN_TYPE type;
unsigned int rand_seed;
};
DEF_TYPEID(batch_gen_data_s);
static void get_indices(struct batch_gen_data_s* data)
{
if (data->start + data->Nb <= data->Nt)
return;
switch (data->type) {
case BATCH_GEN_SAME:
for (long i = 0; i < data->Nt; i++)
data->perm[i] = i;
break;
case BATCH_GEN_SHUFFLE_BATCHES:
rand_perm_batches(&(data->rand_seed), data->Nt, data->perm, data->Nb);
break;
case BATCH_GEN_SHUFFLE_DATA:
rand_perm_data(&(data->rand_seed), data->Nt, data->perm, data->Nb);
break;
case BATCH_GEN_RANDOM_DATA:
rand_draw_data(&(data->rand_seed), data->Nt, data->perm, data->Nb);
break;
default:
assert(0);
}
data->start = 0;
}
static void batch_gen_fun(const struct nlop_data_s* _data, int N_args, complex float* args[N_args])
{
const auto data = CAST_DOWN(batch_gen_data_s, _data);
assert(data->D == N_args);
get_indices(data);
int N = data->N;
for (long j = 0; j < data->D; j++) {
if (-1 == data->bat_idx[j]) {
md_copy(N, data->dims[j], args[j], data->data[j], CFL_SIZE);
continue;
}
long ipos[N];
long opos[N];
for (int i = 0; i < N; i++) {
ipos[i] = 0;
opos[i] = 0;
}
for (int i = 0; i < data->Nb; i++) {
ipos[data->bat_idx[j]] = data->perm[(data->start + i)];
opos[data->bat_idx[j]] = i;
md_copy2( N, data->dims[j],
data->ostrs[j], &MD_ACCESS(N, data->ostrs[j], opos, args[j]),
data->istrs[j], &MD_ACCESS(N, data->istrs[j], ipos, data->data[j]),
CFL_SIZE);
}
}
data->start += data->Nb;
}
static void batch_gen_del(const nlop_data_t* _data)
{
const auto data = CAST_DOWN(batch_gen_data_s, _data);
for(long i = 0; i < data->D; i ++) {
xfree(data->dims[i]);
xfree(data->ostrs[i]);
xfree(data->istrs[i]);
}
xfree(data->dims);
xfree(data->ostrs);
xfree(data->istrs);
xfree(data->bat_idx);
xfree(data->data);
xfree(data->perm);
xfree(data);
}
/**
* Create an operator copying Nb random (not necessarily distinct) datasets to the output
*
* @param D number of tensores
* @param Ns number of dimensions for each tensor
* @param bat_dims output_dims
* @param tot_dims total dims of dataset
* @param data pointers to data
* @param Nb batch size
* @param Nc number of calls (initializes the nlop as it had been applied Nc times) -> reproducible warm start
* @param type methode to compose new batches
* @param seed seed for random reshuffeling of batches
*/
const struct nlop_s* batch_gen_create(int D, const int Ns[D], const long* bat_dims[D], const long* tot_dims[D], const _Complex float* data[D], long Nc, enum BATCH_GEN_TYPE type, unsigned int seed)
{
PTR_ALLOC(struct batch_gen_data_s, d);
SET_TYPEID(batch_gen_data_s, d);
d->D = D;
d->Nb = 1;
d->Nt = 1;
int N = 0;
int bat_idx[D];
for (int j = 0; j < D; j++) {
bat_idx[j] = -1;
for (int i = 0; i < Ns[j]; i++) {
if (bat_dims[j][i] != tot_dims[j][i]) {
assert(-1 == bat_idx[j]);
bat_idx[j] = i;
assert((d->Nt == tot_dims[j][i]) || (1 == d->Nt) || (1 == tot_dims[j][i]));
d->Nt = MAX(d->Nt, tot_dims[j][i]);
assert((d->Nb == bat_dims[j][i]) || (1 == d->Nb));
d->Nb = MAX(d->Nb, bat_dims[j][i]);
}
}
N = MAX(N, Ns[j]);
}
long nl_odims[D][N];
for(long i = 0; i < D; i++)
md_singleton_dims(N, nl_odims[i]);
PTR_ALLOC(const long*[D], sdims);
PTR_ALLOC(const long*[D], ostrs);
PTR_ALLOC(const long*[D], istrs);
PTR_ALLOC(int[D], n_bat_idx);
PTR_ALLOC(const complex float*[D], ndata);
for (long j = 0; j < D; j++) {
md_copy_dims(Ns[j], nl_odims[j], bat_dims[j]);
PTR_ALLOC(long [N], slice_dims);
md_singleton_dims(N, *slice_dims);
md_copy_dims(Ns[j], *slice_dims, bat_dims[j]);
if (-1 != bat_idx[j])
(*slice_dims)[bat_idx[j]] = 1;
PTR_ALLOC(long [N], ostr);
PTR_ALLOC(long [N], istr);
md_calc_strides(N, *ostr, nl_odims[j], CFL_SIZE);
md_singleton_strides(N, *istr);
md_calc_strides(Ns[j], *istr, tot_dims[j], CFL_SIZE);
(*sdims)[j] = *PTR_PASS(slice_dims);
(*ostrs)[j] = *PTR_PASS(ostr);
(*istrs)[j] = *PTR_PASS(istr);
(*ndata)[j] = data[j];
(*n_bat_idx)[j] = bat_idx[j];
}
d->N = N;
d->data = *PTR_PASS(ndata);
d->dims = *PTR_PASS(sdims);
d->ostrs = *PTR_PASS(ostrs);
d->istrs = *PTR_PASS(istrs);
d->bat_idx = *PTR_PASS(n_bat_idx);
d->rand_seed = seed;
d->type = type;
PTR_ALLOC(long[d->Nt], perm);
d->perm = *PTR_PASS(perm);
d->start = d->Nt + 1; //enforce drwaing new permutation
get_indices(d);
for (int i = 0; i < Nc; i++) { //initializing the state after Nc calls to batchnorm
get_indices(d);
d->start = (d->start + d->Nb);
}
assert(d->Nb <= d->Nt);
const struct nlop_s* result = nlop_generic_create(D, N, nl_odims, 0, 0, NULL, CAST_UP(PTR_PASS(d)), batch_gen_fun, NULL, NULL, NULL, NULL, batch_gen_del);
for (int i = 0; i < D; i ++)
result = nlop_reshape_out_F(result, i, Ns[i], nl_odims[i]);
return result;
}
const struct nlop_s* batch_gen_create_from_iter(struct iter6_conf_s* iter_conf, int D, const int Ns[D], const long* bat_dims[D], const long* tot_dims[D], const _Complex float* data[D], long Nc)
{
return batch_gen_create(D, Ns, bat_dims, tot_dims, data, Nc, iter_conf->batchgen_type, iter_conf->batch_seed);
}
|
diagmv_x_coo_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
diagmv_c_coo_u_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return diagmv_c_coo_u_omp(alpha, A, x, beta, y);
}
|
convolution_pack1to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
const signed char* kptr = weight_data_int8.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
__m128i _val = _mm_set1_epi16((short)sptr[space_ofs[k]]);
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w = _mm_loadl_epi64((const __m128i*)kptr);
_w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
kptr += 4;
}
}
_mm_storeu_si128((__m128i*)(outptr + j * 4), _sum0);
}
outptr += outw * 4;
}
}
}
|
matrix_vector_functions_intel_mkl.c | /* high level matrix/vector functions using Intel MKL for blas */
#include "matrix_vector_functions_intel_mkl.h"
#include "mkl_scalapack.h"
/* initialize new matrix and set all entries to zero */
mat * matrix_new(int nrows, int ncols)
{
mat *M;
M = (mat *)malloc(sizeof(mat));
//M->d = (double*)mkl_calloc(nrows*ncols, sizeof(double), 64);
M->d = (double*)calloc(nrows*ncols, sizeof(double));
M->nrows = nrows;
M->ncols = ncols;
return M;
}
/* initialize new vector and set all entries to zero */
vec * vector_new(int nrows)
{
vec *v;
v =(vec *) malloc(sizeof(vec));
//v->d = (double*)mkl_calloc(nrows,sizeof(double), 64);
v->d = (double*)calloc(nrows,sizeof(double));
v->nrows = nrows;
return v;
}
void matrix_delete(mat *M)
{
//mkl_free(M->d);
free(M->d);
free(M);
}
void vector_delete(vec *v)
{
//mkl_free(v->d);
free(v->d);
free(v);
}
// column major format
void matrix_set_element(mat *M, int row_num, int col_num, double val){
//M->d[row_num*(M->ncols) + col_num] = val;
M->d[col_num*(M->nrows) + row_num] = val;
}
double matrix_get_element(mat *M, int row_num, int col_num){
//return M->d[row_num*(M->ncols) + col_num];
return M->d[col_num*(M->nrows) + row_num];
}
void vector_set_element(vec *v, int row_num, double val){
v->d[row_num] = val;
}
double vector_get_element(vec *v, int row_num){
return v->d[row_num];
}
/* load matrix from binary file
* the nonzeros are in order of double loop over rows and columns
format:
num_rows (int)
num_columns (int)
nnz (double)
...
nnz (double)
*/
mat * matrix_load_from_binary_file(char *fname){
int i, j, num_rows, num_columns, row_num, col_num;
double nnz_val;
size_t one = 1;
FILE *fp;
mat *M;
fp = fopen(fname,"r");
fread(&num_rows,sizeof(int),one,fp); //read m
fread(&num_columns,sizeof(int),one,fp); //read n
printf("initializing M of size %d by %d\n", num_rows, num_columns);
M = matrix_new(num_rows,num_columns);
printf("done..\n");
// read and set elements
for(i=0; i<num_rows; i++){
for(j=0; j<num_columns; j++){
fread(&nnz_val,sizeof(double),one,fp); //read nnz
matrix_set_element(M,i,j,nnz_val);
}
}
fclose(fp);
return M;
}
/* write matrix to binary file
* the nonzeros are in order of double loop over rows and columns
format:
num_rows (int)
num_columns (int)
nnz (double)
...
nnz (double)
*/
void matrix_write_to_binary_file(mat *M, char *fname){
int i, j, num_rows, num_columns, row_num, col_num;
double nnz_val;
size_t one = 1;
FILE *fp;
num_rows = M->nrows; num_columns = M->ncols;
fp = fopen(fname,"w");
fwrite(&num_rows,sizeof(int),one,fp); //write m
fwrite(&num_columns,sizeof(int),one,fp); //write n
// write the elements
for(i=0; i<num_rows; i++){
for(j=0; j<num_columns; j++){
nnz_val = matrix_get_element(M,i,j);
fwrite(&nnz_val,sizeof(double),one,fp); //write nnz
}
}
fclose(fp);
}
void matrix_print(mat * M){
int i,j;
double val;
for(i=0; i<M->nrows; i++){
for(j=0; j<M->ncols; j++){
val = matrix_get_element(M, i, j);
printf("%f ", val);
}
printf("\n");
}
}
void vector_print(vec * v){
int i;
double val;
for(i=0; i<v->nrows; i++){
val = vector_get_element(v, i);
printf("%f\n", val);
}
}
/* v(:) = data */
void vector_set_data(vec *v, double *data){
int i;
#pragma omp parallel shared(v) private(i)
{
#pragma omp for
for(i=0; i<(v->nrows); i++){
v->d[i] = data[i];
}
}
}
/* scale vector by a constant */
void vector_scale(vec *v, double scalar){
int i;
#pragma omp parallel shared(v,scalar) private(i)
{
#pragma omp for
for(i=0; i<(v->nrows); i++){
v->d[i] = scalar*(v->d[i]);
}
}
}
/* scale matrix by a constant */
void matrix_scale(mat *M, double scalar){
int i;
#pragma omp parallel shared(M,scalar) private(i)
{
#pragma omp for
for(i=0; i<((M->nrows)*(M->ncols)); i++){
M->d[i] = scalar*(M->d[i]);
}
}
}
/* copy contents of vec s to d */
void vector_copy(vec *d, vec *s){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(d,s) private(i)
{
#pragma omp for
for(i=0; i<(s->nrows); i++){
d->d[i] = s->d[i];
}
}
}
/* copy contents of mat S to D */
void matrix_copy(mat *D, mat *S){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(D,S) private(i)
{
#pragma omp for
for(i=0; i<((S->nrows)*(S->ncols)); i++){
D->d[i] = S->d[i];
}
}
}
/* hard threshold matrix entries */
void matrix_hard_threshold(mat *M, double TOL){
int i;
#pragma omp parallel shared(M) private(i)
{
#pragma omp for
for(i=0; i<((M->nrows)*(M->ncols)); i++){
if(fabs(M->d[i]) < TOL){
M->d[i] = 0;
}
}
}
}
/* build transpose of matrix : Mt = M^T */
void matrix_build_transpose(mat *Mt, mat *M){
int i,j;
for(i=0; i<(M->nrows); i++){
for(j=0; j<(M->ncols); j++){
matrix_set_element(Mt,j,i,matrix_get_element(M,i,j));
}
}
}
void matrix_build_transpose_debug(mat *Mt, mat *M, int mark){
int i,j;
printf("enter\n");
for(i=0; i<(M->nrows); i++){
if (mark==3) printf("i=%d\n", i);
for(j=0; j<(M->ncols); j++){
// if (mark==3) printf("i=%d, j=%d\n", i, j);
matrix_set_element(Mt,j,i,matrix_get_element(M,i,j));
}
}
}
/* subtract b from a and save result in a */
void vector_sub(vec *a, vec *b){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(a,b) private(i)
{
#pragma omp for
for(i=0; i<(a->nrows); i++){
a->d[i] = a->d[i] - b->d[i];
}
}
}
/* subtract B from A and save result in A */
void matrix_sub(mat *A, mat *B){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(A,B) private(i)
{
#pragma omp for
for(i=0; i<((A->nrows)*(A->ncols)); i++){
A->d[i] = A->d[i] - B->d[i];
}
}
}
/* A = A - u*v where u is a column vec and v is a row vec */
void matrix_sub_column_times_row_vector(mat *A, vec *u, vec *v){
int i,j;
#pragma omp parallel for shared(A,u,v) private(j)
for(i=0; i<(A->nrows); i++){
for(j=0; j<(A->ncols); j++){
matrix_set_element(A,i,j,matrix_get_element(A,i,j) - vector_get_element(u,i)*vector_get_element(v,j));
}
}
}
/* compute euclidean norm of vector */
double vector_get2norm(vec *v){
int i;
double val, normval = 0;
#pragma omp parallel shared(v,normval) private(i,val)
{
#pragma omp for reduction(+:normval)
for(i=0; i<(v->nrows); i++){
val = v->d[i];
normval += val*val;
}
}
return sqrt(normval);
}
/* returns the dot product of two vectors */
double vector_dot_product(vec *u, vec *v){
int i;
double dotval = 0;
#pragma omp parallel shared(u,v,dotval) private(i)
{
#pragma omp for reduction(+:dotval)
for(i=0; i<u->nrows; i++){
dotval += (u->d[i])*(v->d[i]);
}
}
return dotval;
}
/* matrix frobenius norm */
double get_matrix_frobenius_norm(mat *M){
int i;
double val, normval = 0;
#pragma omp parallel shared(M,normval) private(i,val)
{
#pragma omp for reduction(+:normval)
for(i=0; i<((M->nrows)*(M->ncols)); i++){
val = M->d[i];
normval += val*val;
}
}
return sqrt(normval);
}
/* matrix max abs val */
double get_matrix_max_abs_element(mat *M){
int i;
double val, max = 0;
for(i=0; i<((M->nrows)*(M->ncols)); i++){
val = M->d[i];
if( fabs(val) > max )
max = val;
}
return max;
}
/* calculate percent error between A and B
in terms of Frobenius norm: 100*norm(A - B)/norm(A) */
double get_percent_error_between_two_mats(mat *A, mat *B){
int m,n;
double normA, normB, normA_minus_B;
mat *A_minus_B;
m = A->nrows;
n = A->ncols;
A_minus_B = matrix_new(m,n);
matrix_copy(A_minus_B, A);
matrix_sub(A_minus_B, B);
normA = get_matrix_frobenius_norm(A);
normB = get_matrix_frobenius_norm(B);
normA_minus_B = get_matrix_frobenius_norm(A_minus_B);
matrix_delete(A_minus_B);
return 100.0*normA_minus_B/normA;
}
double get_matrix_column_norm_squared(mat *M, int colnum){
int i, m, n;
double val,colnorm;
m = M->nrows;
n = M->ncols;
colnorm = 0;
for(i=0; i<m; i++){
val = matrix_get_element(M,i,colnum);
colnorm += val*val;
}
return colnorm;
}
double matrix_getmaxcolnorm(mat *M){
int i,m,n;
vec *col_vec;
double vecnorm, maxnorm;
m = M->nrows; n = M->ncols;
col_vec = vector_new(m);
maxnorm = 0;
#pragma omp parallel for
for(i=0; i<n; i++){
matrix_get_col(M,i,col_vec);
vecnorm = vector_get2norm(col_vec);
#pragma omp critical
if(vecnorm > maxnorm){
maxnorm = vecnorm;
}
}
vector_delete(col_vec);
return maxnorm;
}
void compute_matrix_column_norms(mat *M, vec *column_norms){
int j;
#pragma omp parallel shared(column_norms,M) private(j)
{
#pragma omp parallel for
for(j=0; j<(M->ncols); j++){
vector_set_element(column_norms,j, get_matrix_column_norm_squared(M,j));
}
}
}
/* initialize a random matrix */
void initialize_random_matrix(mat *M){
int i,m,n;
double val;
m = M->nrows;
n = M->ncols;
float a=0.0,sigma=1.0;
int N = m*n;
float *r;
VSLStreamStatePtr stream;
r = (float*)malloc(N*sizeof(float));
vslNewStream( &stream, BRNG, time(NULL) );
//vslNewStream( &stream, BRNG, SEED );
vsRngGaussian( METHOD, stream, N, r, a, sigma );
// read and set elements
#pragma omp parallel shared(M,N,r) private(i,val)
{
#pragma omp parallel for
for(i=0; i<N; i++){
val = r[i];
M->d[i] = val;
}
}
free(r);
}
/* initialize diagonal matrix from vector data */
void initialize_diagonal_matrix(mat *D, vec *data){
int i;
#pragma omp parallel shared(D) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(D,i,i,data->d[i]);
}
}
}
/* initialize identity */
void initialize_identity_matrix(mat *D){
int i;
matrix_scale(D, 0);
#pragma omp parallel shared(D) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(D,i,i,1.0);
}
}
}
/* invert diagonal matrix */
void invert_diagonal_matrix(mat *Dinv, mat *D){
int i;
#pragma omp parallel shared(D,Dinv) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(Dinv,i,i,1.0/(matrix_get_element(D,i,i)));
}
}
}
/* C = A*B ; column major */
void matrix_matrix_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* C = A^T*B ; column major */
void matrix_transpose_matrix_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* C = A*B^T ; column major */
void matrix_matrix_transpose_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* y = M*x ; column major */
void matrix_vector_mult(mat *M, vec *x, vec *y){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemv (CblasColMajor, CblasNoTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1);
}
/* y = M^T*x ; column major */
void matrix_transpose_vector_mult(mat *M, vec *x, vec *y){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemv (CblasColMajor, CblasTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1);
}
/* set column of matrix to vector */
void matrix_set_col(mat *M, int j, vec *column_vec){
int i;
#pragma omp parallel shared(column_vec,M,j) private(i)
{
#pragma omp for
for(i=0; i<M->nrows; i++){
matrix_set_element(M,i,j,vector_get_element(column_vec,i));
}
}
}
/* extract column of a matrix into a vector */
void matrix_get_col(mat *M, int j, vec *column_vec){
int i;
#pragma omp parallel shared(column_vec,M,j) private(i)
{
#pragma omp parallel for
for(i=0; i<M->nrows; i++){
vector_set_element(column_vec,i,matrix_get_element(M,i,j));
}
}
}
/* extract row i of a matrix into a vector */
void matrix_get_row(mat *M, int i, vec *row_vec){
int j;
#pragma omp parallel shared(row_vec,M,i) private(j)
{
#pragma omp parallel for
for(j=0; j<M->ncols; j++){
vector_set_element(row_vec,j,matrix_get_element(M,i,j));
}
}
}
/* put vector row_vec as row i of a matrix */
void matrix_set_row(mat *M, int i, vec *row_vec){
int j;
#pragma omp parallel shared(row_vec,M,i) private(j)
{
#pragma omp parallel for
for(j=0; j<M->ncols; j++){
matrix_set_element(M,i,j,vector_get_element(row_vec,j));
}
}
}
/* Mc = M(:,inds) */
/*void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec = vector_new(M->nrows);
for(i=0; i<(Mc->ncols); i++){
matrix_get_col(M,inds[i],col_vec);
matrix_set_col(Mc,i,col_vec);
}
vector_delete(col_vec);
}*/
/* Mc = M(:,inds) */
void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec;
//printf("%d %d\n", M->ncols, Mc->ncols);
#pragma omp parallel shared(M,Mc,inds) private(i,col_vec)
{
#pragma omp parallel for
for(i=0; i<(Mc->ncols); i++){
//printf("line:%d\n", i);
col_vec = vector_new(M->nrows);
matrix_get_col(M,inds[i],col_vec);
matrix_set_col(Mc,i,col_vec);
vector_delete(col_vec);
}
}
}
/* M(:,inds) = Mc */
/*void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec = vector_new(M->nrows);
for(i=0; i<(Mc->ncols); i++){
matrix_get_col(Mc,i,col_vec);
matrix_set_col(M,inds[i],col_vec);
}
vector_delete(col_vec);
}*/
/* M(:,inds) = Mc */
void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec;
#pragma omp parallel shared(M,Mc,inds) private(i,col_vec)
{
#pragma omp parallel for
for(i=0; i<(Mc->ncols); i++){
col_vec = vector_new(M->nrows);
matrix_get_col(Mc,i,col_vec);
matrix_set_col(M,inds[i],col_vec);
vector_delete(col_vec);
}
}
}
/* Mr = M(inds,:) */
/*void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec = vector_new(M->ncols);
for(i=0; i<(Mr->nrows); i++){
matrix_get_row(M,inds[i],row_vec);
matrix_set_row(Mr,i,row_vec);
}
vector_delete(row_vec);
}*/
/* Mr = M(inds,:) */
void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec;
#pragma omp parallel shared(M,Mr,inds) private(i,row_vec)
{
#pragma omp parallel for
for(i=0; i<(Mr->nrows); i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,inds[i],row_vec);
matrix_set_row(Mr,i,row_vec);
vector_delete(row_vec);
}
}
}
/* M(inds,:) = Mr */
/*void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec = vector_new(M->ncols);
for(i=0; i<(Mr->nrows); i++){
matrix_get_row(Mr,i,row_vec);
matrix_set_row(M,inds[i],row_vec);
}
vector_delete(row_vec);
}*/
/* M(inds,:) = Mr */
void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec;
#pragma omp parallel shared(M,Mr,inds) private(i,row_vec)
{
#pragma omp parallel for
for(i=0; i<(Mr->nrows); i++){
row_vec = vector_new(M->ncols);
matrix_get_row(Mr,i,row_vec);
matrix_set_row(M,inds[i],row_vec);
vector_delete(row_vec);
}
}
}
/* copy only upper triangular matrix part as for symmetric matrix */
void matrix_copy_symmetric(mat *S, mat *M){
int i,j,n,m;
m = M->nrows;
n = M->ncols;
for(i=0; i<m; i++){
for(j=0; j<n; j++){
if(j>=i){
matrix_set_element(S,i,j,matrix_get_element(M,i,j));
}
}
}
}
/* copy only upper triangular matrix part as for symmetric matrix */
void matrix_keep_only_upper_triangular(mat *M){
int i,j,n,m;
m = M->nrows;
n = M->ncols;
for(i=0; i<m; i++){
for(j=0; j<n; j++){
if(j<i){
matrix_set_element(M,i,j,0);
}
}
}
}
/*
% project v in direction of u
function p=project_vec(v,u)
p = (dot(v,u)/norm(u)^2)*u;
*/
void project_vector(vec *v, vec *u, vec *p){
double dot_product_val, vec_norm, scalar_val;
dot_product_val = vector_dot_product(v, u);
vec_norm = vector_get2norm(u);
scalar_val = dot_product_val/(vec_norm*vec_norm);
vector_copy(p, u);
vector_scale(p, scalar_val);
}
/* build orthonormal basis matrix
Q = Y;
for j=1:k
vj = Q(:,j);
for i=1:(j-1)
vi = Q(:,i);
vj = vj - project_vec(vj,vi);
end
vj = vj/norm(vj);
Q(:,j) = vj;
end
*/
void build_orthonormal_basis_from_mat(mat *A, mat *Q){
int m,n,i,j,ind,num_ortos=2;
double vec_norm;
vec *vi,*vj,*p;
m = A->nrows;
n = A->ncols;
vi = vector_new(m);
vj = vector_new(m);
p = vector_new(m);
matrix_copy(Q, A);
for(ind=0; ind<num_ortos; ind++){
for(j=0; j<n; j++){
matrix_get_col(Q, j, vj);
for(i=0; i<j; i++){
matrix_get_col(Q, i, vi);
project_vector(vj, vi, p);
vector_sub(vj, p);
}
vec_norm = vector_get2norm(vj);
vector_scale(vj, 1.0/vec_norm);
matrix_set_col(Q, j, vj);
}
}
vector_delete(vi);
vector_delete(vj);
vector_delete(p);
}
/* output = input[inds] */
void fill_vector_from_row_list(vec *input, vec *inds, vec *output){
int i,col_num;
for(i=0; i<(input->nrows); i++){
vector_set_element(output,i,vector_get_element(input,vector_get_element(inds,i)));
}
}
/* copy the first k rows of M into M_out where k = M_out->nrows (M_out pre-initialized) */
void matrix_copy_first_rows(mat *M_out, mat *M){
int i,k;
k = M_out->nrows;
vec * row_vec;
for(i=0; i<k; i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,i,row_vec);
matrix_set_row(M_out,i,row_vec);
vector_delete(row_vec);
}
}
/* copy the first k columns of M into M_out where k = M_out->ncols (M_out pre-initialized) */
void matrix_copy_first_columns(mat *M_out, mat *M){
int i,k;
k = M_out->ncols;
vec * col_vec;
for(i=0; i<k; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_out,i,col_vec);
vector_delete(col_vec);
}
}
/* copy contents of mat S to D */
void matrix_copy_first_columns_with_param(mat *D, mat *S, int num_columns){
int i,j;
for(i=0; i<(S->nrows); i++){
for(j=0; j<num_columns; j++){
matrix_set_element(D,i,j,matrix_get_element(S,i,j));
}
}
}
/* copy the first k rows and columns of M into M_out is kxk where k = M_out->ncols (M_out pre-initialized)
M_out = M(1:k,1:k) */
void matrix_copy_first_k_rows_and_columns(mat *M_out, mat *M){
int i,j,k;
k = M_out->ncols;
vec * col_vec;
for(i=0; i<k; i++){
for(j=0; j<k; j++){
matrix_set_element(M_out,i,j,matrix_get_element(M,i,j));
}
}
}
/* M_out = M(:,k+1:end) */
void matrix_copy_all_rows_and_last_columns_from_indexk(mat *M_out, mat *M, int k){
int i,j,i_out,j_out;
vec * col_vec;
for(i=0; i<(M->nrows); i++){
for(j=k; j<(M->ncols); j++){
i_out = i; j_out = j - k;
matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j));
}
}
}
void fill_matrix_from_first_rows(mat *M, int k, mat *M_k){
int i;
vec *row_vec;
//#pragma omp parallel shared(M,M_k,k) private(i,row_vec)
{
//#pragma omp for
for(i=0; i<k; i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,i,row_vec);
matrix_set_row(M_k,i,row_vec);
vector_delete(row_vec);
}
}
}
void fill_matrix_from_first_columns(mat *M, int k, mat *M_k){
int i;
vec *col_vec;
//#pragma omp parallel shared(M,M_k,k) private(i,col_vec)
{
//#pragma omp for
for(i=0; i<k; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_k,i,col_vec);
vector_delete(col_vec);
}
}
}
void fill_matrix_from_last_columns(mat *M, int k, mat *M_k){
int i,ind;
vec *col_vec;
ind = 0;
for(i=k; i<M->ncols; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_k,ind,col_vec);
vector_delete(col_vec);
ind++;
}
}
/* Mout = M((k+1):end,(k+1):end) in matlab notation */
void fill_matrix_from_lower_right_corner(mat *M, int k, mat *M_out){
int i,j,i_out,j_out;
for(i=k; i<M->nrows; i++){
for(j=k; j<M->ncols; j++){
i_out = i-k;
j_out = j-k;
//printf("setting element %d, %d of M_out\n", i_out, j_out);
matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j));
}
}
}
/* append matrices side by side: C = [A, B] */
void append_matrices_horizontally(mat *A, mat *B, mat *C){
int i,j;
#pragma omp parallel shared(C,A) private(i)
{
#pragma omp for
for(i=0; i<((A->nrows)*(A->ncols)); i++){
C->d[i] = A->d[i];
}
}
#pragma omp parallel shared(C,B,A) private(i)
{
#pragma omp for
for(i=0; i<((B->nrows)*(B->ncols)); i++){
C->d[i + (A->nrows)*(A->ncols)] = B->d[i];
}
}
/*
for(i=0; i<A->nrows; i++){
for(j=0; j<A->ncols; j++){
matrix_set_element(C,i,j,matrix_get_element(A,i,j));
}
}
for(i=0; i<B->nrows; i++){
for(j=0; j<B->ncols; j++){
matrix_set_element(C,i,A->ncols + j,matrix_get_element(B,i,j));
}
}*/
}
/* append matrices vertically: C = [A; B] */
void append_matrices_vertically(mat *A, mat *B, mat *C){
int i,j;
for(i=0; i<A->nrows; i++){
for(j=0; j<A->ncols; j++){
matrix_set_element(C,i,j,matrix_get_element(A,i,j));
}
}
for(i=0; i<B->nrows; i++){
for(j=0; j<B->ncols; j++){
matrix_set_element(C,A->nrows+i,j,matrix_get_element(B,i,j));
}
}
}
/* compute eigendecomposition of symmetric matrix M
*/
void compute_evals_and_evecs_of_symm_matrix(mat *S, vec *evals){
//LAPACKE_dsyev( LAPACK_ROW_MAJOR, 'V', 'U', S->nrows, S->d, S->nrows, evals->d);
LAPACKE_dsyev( LAPACK_COL_MAJOR, 'V', 'U', S->nrows, S->d, S->ncols, evals->d);
}
/* Performs [Q,R] = qr(M,'0') compact QR factorization
M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */
void compact_QR_factorization(mat *M, mat *Q, mat *R){
int i,j,m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
// printf("doing QR with m = %d, n = %d, k = %d\n", m,n,k);
mat *R_full = matrix_new(m,n);
matrix_copy(R_full,M);
//vec *tau = vector_new(n);
vec *tau = vector_new(k);
// get R
//printf("get R..\n");
//LAPACKE_dgeqrf(CblasColMajor, m, n, R_full->d, n, tau->d);
LAPACKE_dgeqrf(LAPACK_COL_MAJOR, R_full->nrows, R_full->ncols, R_full->d, R_full->nrows, tau->d);
for(i=0; i<k; i++){
for(j=0; j<k; j++){
if(j>=i){
matrix_set_element(R,i,j,matrix_get_element(R_full,i,j));
}
}
}
// get Q
matrix_copy(Q,R_full);
//printf("dorgqr..\n");
LAPACKE_dorgqr(LAPACK_COL_MAJOR, Q->nrows, Q->ncols, min(Q->ncols,Q->nrows), Q->d, Q->nrows, tau->d);
// clean up
matrix_delete(R_full);
vector_delete(tau);
}
/* returns Q from [Q,R] = qr(M,'0') compact QR factorization
M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */
void QR_factorization_getQ(mat *M, mat *Q){
int i,j,m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
matrix_copy(Q,M);
vec *tau = vector_new(k);
LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d);
LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d);
// clean up
vector_delete(tau);
}
void QR_factorization_getQ_inplace(mat *Q) {
// printf("k1\n");
int i,j,m,n,k;
m = Q->nrows; n = Q->ncols;
k = min(m,n);
int *jpvt = (int*)malloc(n*sizeof(int));
vec *tau = vector_new(k);
// check memory allocation
// printf("k1b\n");
// for (i=0; i++; i<m) {
// for (j=0; j++; j<n) {
// matrix_set_element(Q, i, j, matrix_get_element(Q, i, j));
// }
// }
/*
BUG DETECTED! the dgeqrf call raises segmentation fault occasionally.
the arguments passed to it seems to be fine. probably it's due to bug
internal to MKL.
To reproduce the bug: call qr_bug_reproduce() in main.c
*/
// printf("k2 m=%d,n=%d,size=%d,tau=%d\n", m, n, sizeof(Q->d), k);
// LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d);
LAPACKE_dgeqpf(LAPACK_COL_MAJOR, m, n, Q->d, m, jpvt, tau->d);
// printf("k2b\n");
LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d);
// printf("k3\n");
// clean up
vector_delete(tau);
free(jpvt);
// printf("k4\n");
}
/* computes SVD: M = U*S*Vt; note Vt = V^T */
void singular_value_decomposition(mat *M, mat *U, mat *S, mat *Vt){
int m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
vec * work = vector_new(2*max(3*min(m, n)+max(m, n), 5*min(m,n)));
vec * svals = vector_new(k);
LAPACKE_dgesvd( LAPACK_COL_MAJOR, 'S', 'S', m, n, M->d, m, svals->d, U->d, m, Vt->d, k, work->d );
initialize_diagonal_matrix(S, svals);
vector_delete(work);
vector_delete(svals);
}
void form_svd_product_matrix(mat *U, mat *S, mat *V, mat *P){
int k,m,n;
double alpha, beta;
alpha = 1.0; beta = 0.0;
m = P->nrows;
n = P->ncols;
k = S->nrows;
mat * SVt = matrix_new(k,n);
// form SVt = S*V^T
matrix_matrix_transpose_mult(S,V,SVt);
// form P = U*S*V^T
matrix_matrix_mult(U,SVt,P);
}
void estimate_rank_and_buildQ(mat *M, double frac_of_max_rank, double TOL, mat **Q, int *good_rank){
int m,n,i,j,ind,maxdim;
double vec_norm;
mat *RN,*Y,*Qbig,*Qsmall;
vec *vi,*vj,*p,*p1;
m = M->nrows;
n = M->ncols;
maxdim = round(min(m,n)*frac_of_max_rank);
vi = vector_new(m);
vj = vector_new(m);
p = vector_new(m);
p1 = vector_new(m);
// build random matrix
printf("form RN..\n");
RN = matrix_new(n, maxdim);
initialize_random_matrix(RN);
// multiply to get matrix of random samples Y
printf("form Y: %d x %d..\n",m,maxdim);
Y = matrix_new(m, maxdim);
matrix_matrix_mult(M, RN, Y);
// estimate rank k and build Q from Y
printf("form Qbig..\n");
Qbig = matrix_new(m, maxdim);
matrix_copy(Qbig, Y);
printf("estimate rank with TOL = %f..\n", TOL);
*good_rank = maxdim;
int forbreak = 0;
for(j=0; !forbreak && j<maxdim; j++){
matrix_get_col(Qbig, j, vj);
for(i=0; i<j; i++){
matrix_get_col(Qbig, i, vi);
project_vector(vj, vi, p);
vector_sub(vj, p);
if(vector_get2norm(p) < TOL && vector_get2norm(p1) < TOL){
*good_rank = j;
forbreak = 1;
break;
}
vector_copy(p1,p);
}
vec_norm = vector_get2norm(vj);
vector_scale(vj, 1.0/vec_norm);
matrix_set_col(Qbig, j, vj);
}
printf("estimated rank = %d\n", *good_rank);
Qsmall = matrix_new(m, *good_rank);
*Q = matrix_new(m, *good_rank);
matrix_copy_first_columns(Qsmall, Qbig);
QR_factorization_getQ(Qsmall, *Q);
matrix_delete(RN);
matrix_delete(Y);
matrix_delete(Qsmall);
matrix_delete(Qbig);
}
void estimate_rank_and_buildQ2(mat *M, int kblock, double TOL, mat **Y, mat **Q, int *good_rank){
int m,n,i,j,ind,exit_loop = 0;
double error_norm;
mat *RN,*Y_new,*Y_big,*QtM,*QQtM;
vec *vi,*vj,*p,*p1;
m = M->nrows;
n = M->ncols;
// build random matrix
printf("form RN..\n");
RN = matrix_new(n,kblock);
initialize_random_matrix(RN);
// multiply to get matrix of random samples Y
printf("form Y: %d x %d..\n",m,kblock);
*Y = matrix_new(m, kblock);
matrix_matrix_mult(M, RN, *Y);
ind = 0;
while(!exit_loop){
printf("form Q..\n");
if(ind > 0){
matrix_delete(*Q);
}
*Q = matrix_new((*Y)->nrows, (*Y)->ncols);
QR_factorization_getQ(*Y, *Q);
// compute QtM
QtM = matrix_new((*Q)->ncols, M->ncols);
matrix_transpose_matrix_mult(*Q,M,QtM);
// compute QQtM
QQtM = matrix_new(M->nrows, M->ncols);
matrix_matrix_mult(*Q,QtM,QQtM);
error_norm = 0.01*get_percent_error_between_two_mats(QQtM, M);
printf("Y is of size %d x %d and error_norm = %f\n", (*Y)->nrows, (*Y)->ncols, error_norm);
*good_rank = (*Y)->ncols;
// add more samples if needed
if(error_norm > TOL){
Y_new = matrix_new(m, kblock);
initialize_random_matrix(RN);
matrix_matrix_mult(M, RN, Y_new);
Y_big = matrix_new((*Y)->nrows, (*Y)->ncols + Y_new->ncols);
append_matrices_horizontally(*Y, Y_new, Y_big);
matrix_delete(*Y);
*Y = matrix_new(Y_big->nrows,Y_big->ncols);
matrix_copy(*Y,Y_big);
matrix_delete(Y_big);
matrix_delete(Y_new);
matrix_delete(QtM);
matrix_delete(QQtM);
ind++;
}
else{
matrix_delete(RN);
exit_loop = 1;
}
}
}
double get_seconds_frac(struct timeval start_timeval, struct timeval end_timeval){
long secs_used, micros_used;
secs_used=(end_timeval.tv_sec - start_timeval.tv_sec);
micros_used= ((secs_used*1000000) + end_timeval.tv_usec) - (start_timeval.tv_usec);
return (micros_used/1e6);
}
/*********************Lijian***********************/
/* initialize new matrix and set all entries to zero for float*/
void matrix_matrix_mult_row(mat *A, mat* B, mat* C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
}
void matrix_transpose_matrix_mult_row(mat *A, mat* B, mat* C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
}
/*********************Lijian***********************/
|
z_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the z-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the z-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void z_solve()
{
int i, j, k, k1, k2, m;
double ru1, fac1, fac2;
//kai
// int k15;
// consistent_data(&k15, "int", 1);
//---------------------------------------------------------------------
// Prepare for z-solve, array redistribution
//---------------------------------------------------------------------
if (timeron) timer_start(t_zsolve);
#pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \
ru1,fac1,fac2)
for (j = k15+1; j <= ny2; j++) {
lhsinitj(nz2+1, nx2);
//---------------------------------------------------------------------
// Computes the left hand side for the three z-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
for (k = 0; k <= nz2+1; k++) {
ru1 = c3c4*rho_i[k][j][i];
cv[k] = ws[k][j][i];
rhos[k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1));
}
for (k = 1; k <= nz2; k++) {
lhs[k][i][0] = 0.0;
lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k];
lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[k][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
k = 1;
lhs[k][i][2] = lhs[k][i][2] + comz5;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
k = 2;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
for (k = 3; k <= nz2-2; k++) {
for (i = 1; i <= nx2; i++) {
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
}
for (i = 1; i <= nx2; i++) {
k = nz2-1;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
k = nz2;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c)
//---------------------------------------------------------------------
for (k = 1; k <= nz2; k++) {
for (i = 1; i <= nx2; i++) {
lhsp[k][i][0] = lhs[k][i][0];
lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k-1][j][i];
lhsp[k][i][2] = lhs[k][i][2];
lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k+1][j][i];
lhsp[k][i][4] = lhs[k][i][4];
lhsm[k][i][0] = lhs[k][i][0];
lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k-1][j][i];
lhsm[k][i][2] = lhs[k][i][2];
lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k+1][j][i];
lhsm[k][i][4] = lhs[k][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0]*lhs[k][i][3];
lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0]*rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[k1][i][2];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = fac2*rhs[k1][j][i][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0]*lhsp[k][i][3];
lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0]*lhsp[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0]*lhsm[k][i][3];
lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0]*lhsm[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0]*rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately (some of this is overkill
// if this is the last cell)
//---------------------------------------------------------------------
rhs[k1][j][i][3] = rhs[k1][j][i][3]/lhsp[k1][i][2];
rhs[k1][j][i][4] = rhs[k1][j][i][4]/lhsm[k1][i][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4];
}
//---------------------------------------------------------------------
// Whether or not this is the last processor, we always have
// to complete the back-substitution
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] -
lhs[k][i][3]*rhs[k1][j][i][m] -
lhs[k][i][4]*rhs[k2][j][i][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] -
lhsp[k][i][3]*rhs[k1][j][i][3] -
lhsp[k][i][4]*rhs[k2][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] -
lhsm[k][i][3]*rhs[k1][j][i][4] -
lhsm[k][i][4]*rhs[k2][j][i][4];
}
}
//kai
k15 = 0;
}
if (timeron) timer_stop(t_zsolve);
tzetar();
}
|
dense_minmax.c | /* Copyright (c) 2016-2017 Drew Schmidt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <string.h>
#include "coop.h"
#include "utils/fill.h"
#include "utils/inverse.h"
#include "utils/safeomp.h"
#include "utils/special_vals.h"
#define TYPE_MIN 1
#define TYPE_MAX 2
#define TYPE_ABSMIN 3
#define TYPE_ABSMAX 4
typedef struct
{
int K; // Number of comparisons to retain
double *restrict co; // co-variance, sine, ...
int *restrict I; // i
int *restrict J; // j
int *restrict L; // length of run (number of comparisons)
} maxco_t;
// almost sorted sort
// x has some number in the first slot, and is sorted increasing otherwise
// sort x low to high, bring y and z along for the ride
static inline void assort(const register int K, double *restrict A, int *restrict I, int *restrict J, int *restrict L)
{
int ind;
const register double atmp = A[0];
const register int itmp = I[0];
const register int jtmp = J[0];
const register int ltmp = L[0];
for (ind=1; ind<K && A[ind] < atmp; ind++)
{
A[ind] = A[ind + 1];
I[ind] = I[ind + 1];
J[ind] = J[ind + 1];
L[ind] = L[ind + 1];
}
A[ind] = atmp;
I[ind] = itmp;
J[ind] = jtmp;
L[ind] = ltmp;
}
// A sorted least to greatest
static inline void rename_me(const int type, const double cmp, const int K, maxco_t *mx)
{
if (type == TYPE_MIN)
{
// TODO
}
else if (type == TYPE_ABSMIN)
{
// TODO
}
else if (type == TYPE_MAX)
{
if (cmp > mx->co[0])
assort(mx->K, mx->co, mx->I, mx->J);
}
else if (type == TYPE_ABSMAX)
{
// TODO
}
}
static inline void compute_sums(const int m, const int mi, const double * const restrict vec, const double * const restrict x, double *restrict sumx, double *restrict sumy, int *restrict len)
{
int k;
*sumx = 0;
*sumy = 0;
*len = 0;
PLEASE_VECTORIZE
for (k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
{
*sumx += vec[k];
*sumy += x[k + mi];
(*len)++;
}
}
}
// cor - vals, I/J - their indices, L - length of the run
int coop_maxpcor_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, maxco_t *mx)
{
int check;
int ind = 0;
double *vec = malloc(m * sizeof(*vec));
CHECKMALLOC(vec);
for (int j=0; j<n; j++)
{
const int mj = m*j;
memcpy(vec, x+mj, m*sizeof(*vec));
// #pragma omp parallel for default(none) shared(j, vec) if(m*n > OMP_MIN_SIZE)
for (int i=j; i<n; i++)
{
const int mi = m*i;
int len;
double meanx, meany;
compute_sums(m, mi, vec, x, &meanx, &meany, &len);
if (len == 0 || len == 1)
{
set_na_real(mx->cor + (i + n*j));
set_na_real(mx->cor + (j + n*i));
mx->I[ind] = i;
mx->J[ind] = j;
mx->L[ind] = len;
ind++;
continue;
}
const double dlen = (double) len;
meanx /= dlen;
meany /= dlen;
double sdx = 0.;
double sdy = 0.;
SAFE_SIMD
for (int k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
{
sdx += (vec[k] - meanx)*(vec[k] - meanx);
sdy += (x[k + mi] - meany)*(x[k + mi] - meany);
}
}
sdx = sqrt(sdx/(dlen-1.));
sdy = sqrt(sdy/(dlen-1.));
double mmcp = 0.0;
SAFE_SIMD
for (int k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
mmcp += (vec[k] - meanx) * (x[k + mi] - meany);
}
rename_me(TYPE_MAX, mmcp*dlen, mx);
// cor[i + n*j] = mmcp / sdx / sdy / (dlen - 1.0);;
}
}
free(vec);
if (inv)
{
check = inv_sym_chol(n, cor);
CHECKRET(check);
}
return COOP_OK;
}
|
Example_nesting_restrict.3.c | /*
* @@name: nesting_restrict.3c
* @@type: C
* @@compilable: no
* @@linkable: no
* @@expect: failure
*/
void work(int i, int j) {}
void wrong3(int n)
{
#pragma omp parallel default(shared)
{
int i;
#pragma omp for
for (i=0; i<n; i++) {
/* incorrect nesting of regions */
#pragma omp single
work(i, 0);
}
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
clim_img_bitmap.h | #ifndef CLIM_IMG_BITMAP_H
#define CLIM_IMG_BITMAP_H
#include "clim_platform_detector.h"
#ifdef CLIM_COMPILER_MSVC
#pragma once
#endif
#include <stdlib.h>
#include <math.h>
#include "clim_base.h"
#include "clim_utils.h"
#if defined(CLIM_COMPILER_GCC) || defined(CLIM_COMPILER_CLANG)
typedef struct
{
uint16_t signature;
uint32_t filesize;
uint32_t reserved;
uint32_t pixel_data_offset;
} __attribute__((packed)) clim_bitmap_file_header_t;
typedef struct
{
uint32_t size;
int32_t width;
int32_t height;
uint16_t planes;
uint16_t bits_per_pixel;
uint32_t compression;
uint32_t img_size;
int32_t x_pixels_per_meter;
int32_t y_pixels_per_meter;
uint32_t colors_used;
uint32_t colors_important;
} __attribute__((packed)) clim_bitmap_info_header_t;
#elif CLIM_COMPILER_MSVC
#pragma pack(push, 1)
typedef struct
{
uint16_t signature;
uint32_t filesize;
uint32_t reserved;
uint32_t pixel_data_offset;
} clim_bitmap_file_header_t;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct
{
uint32_t size;
int32_t width;
int32_t height;
uint16_t planes;
uint16_t bits_per_pixel;
uint32_t compression;
uint32_t img_size;
int32_t x_pixels_per_meter;
int32_t y_pixels_per_meter;
uint32_t colors_used;
uint32_t colors_important;
} clim_bitmap_info_header_t;
#pragma pack(pop)
#endif
#define CLIM_BMP_ABS(x) (((x) < 0) ? (-x) : (x))
clim_errcode_t clim_img_bmp_load(const uint8_t* buffer,
const size_t buffer_len,
clim_img_ctx_t* pctx)
{
if (!buffer)
return CLIM_EC_INVALID_PARAMETERS;
// TODO(Garcia): Type Punning UB
if (*(const uint16_t *)buffer != 0x4D42)
return CLIM_EC_INVALID_BITMAP_FILE;
pctx->format = CLIM_IMAGE_FORMAT_BMP;
const uint8_t* save_point = buffer;
buffer += offsetof(clim_bitmap_file_header_t, pixel_data_offset);
uint8_t pixel_offset = *buffer;
clim_bitmap_info_header_t info_header = { 0 };
memcpy(&info_header,
save_point + sizeof(clim_bitmap_file_header_t),
sizeof(clim_bitmap_info_header_t));
pctx->data.width = (uint32_t)(CLIM_BMP_ABS(info_header.width));
pctx->data.height = (uint32_t)(CLIM_BMP_ABS(info_header.height));
pctx->data.bytes_per_pixel = (uint8_t) (info_header.bits_per_pixel >> 3U);
const uint8_t bpp = pctx->data.bytes_per_pixel;
if (bpp != 3 && bpp != 4)
return CLIM_EC_UNSUPPORTED_BITMAP_BPP;
buffer = save_point + pixel_offset;
const size_t rowsize = pctx->data.width * pctx->data.bytes_per_pixel;
const uint8_t padd = ((4U - (rowsize & 3U)) & 3U);
const uint8_t mem_bpp = sizeof(clim_pixelcolor_t);
const size_t mem_len = pctx->data.height * pctx->data.width * mem_bpp;
uint8_t* img_pixels = (uint8_t *) clim_mem_alloc(mem_len, false);
const bool is_32_bits_per_pixel = (bpp == 4U);
uint32_t width = pctx->data.width;
uint32_t height = pctx->data.height;
// #pragma omp parallel for
for (uint32_t y = 0; y < height; ++y)
{
for (uint32_t x = 0; x < width; ++x)
{
img_pixels[mem_bpp * (y * width + x) + 3U] = *buffer++ & UINT8_MAX;
img_pixels[mem_bpp * (y * width + x) + 2U] = *buffer++ & UINT8_MAX;
img_pixels[mem_bpp * (y * width + x) + 1U] = *buffer++ & UINT8_MAX;
img_pixels[mem_bpp * (y * width + x) + 0U] = is_32_bits_per_pixel ? (*buffer++ & UINT8_MAX) : (0xffu);
}
if (!is_32_bits_per_pixel)
buffer += padd;
}
pctx->data.pixels = img_pixels;
return CLIM_EC_SUCCESS;
}
clim_errcode_t clim_img_bmp_write(const char* filepath, const clim_img_ctx_t* pctx)
{
CLIM_ASSERT(pctx && filepath);
if (pctx->data.bytes_per_pixel != 3 && pctx->data.bytes_per_pixel != 4)
return CLIM_EC_UNSUPPORTED_BITMAP_BPP;
const uint32_t rowsize = pctx->data.width * pctx->data.bytes_per_pixel;
const uint8_t padd = ((4 - (rowsize & 3)) & 3);
const size_t len = pctx->data.height * (rowsize + padd);
clim_bitmap_file_header_t ctx_header_file =
{
.signature = 0x4D42,
.filesize = (uint32_t)(len + sizeof(clim_bitmap_file_header_t) + sizeof(clim_bitmap_info_header_t)),
.pixel_data_offset = (uint32_t)(sizeof(clim_bitmap_file_header_t) + sizeof(clim_bitmap_info_header_t))
};
clim_bitmap_info_header_t ctx_header_info =
{
.size = sizeof(clim_bitmap_info_header_t),
.width = (int32_t)(pctx->data.width),
.height = (int32_t)(pctx->data.height),
.bits_per_pixel = (uint16_t)(pctx->data.bytes_per_pixel << 3),
.img_size = (uint32_t)(len),
.planes = 1U
};
FILE* file_image = fopen(filepath, "wb");
if (!file_image)
return CLIM_EC_CANNOT_WRITE_FILE;
fwrite(&ctx_header_file, sizeof(ctx_header_file), 1, file_image);
fwrite(&ctx_header_info, sizeof(ctx_header_info), 1, file_image);
const uint8_t bpp = pctx->data.bytes_per_pixel;
const uint8_t mem_bpp = sizeof(clim_pixelcolor_t);
uint8_t* pixels = pctx->data.pixels;
const uint32_t height = pctx->data.height;
const uint32_t width = pctx->data.width;
if (bpp == 4U)
{
clim_bgr2rgb(pixels, mem_bpp,
pixels + (pctx->data.width * pctx->data.height * mem_bpp));
// TODO(Garcia): Ensure return write size
fwrite(pctx->data.pixels, len, sizeof(uint8_t), file_image);
}
else
{
// #pragma omp parallel for
for (uint32_t y = 0U; y < height; ++y)
{
for (uint32_t x = 0U; x < width; ++x)
{
fputc(pixels[mem_bpp * (y * width + x) + 3U], file_image); // B
fputc(pixels[mem_bpp * (y * width + x) + 2U], file_image); // G
fputc(pixels[mem_bpp * (y * width + x) + 1U], file_image); // R
}
fwrite(&(uint8_t){0x0}, sizeof(uint8_t), padd, file_image);
}
}
clim_fclose(file_image);
return CLIM_EC_SUCCESS;
}
#endif
|
omp_mergesort.h | #ifndef OMP_MERGESORT_H_QHRJTEP9
#define OMP_MERGESORT_H_QHRJTEP9
/*
OpenMP recursive merge sort
Copyright (C) 2011 Atanas Radenski
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <omp.h>
#include <vector>
#include <algorithm>
const int Small = 32;
template <typename T, typename Cmp>
void merge(T arr[], int size, T temp[], Cmp cmp);
template <typename T, typename Cmp>
void insertion_sort(T arr[], int size, Cmp cmp);
template <typename T, typename Cmp>
void mergesort_serial(T a[], int size, T temp[], Cmp cmp);
template <typename T, typename Cmp>
void mergesort_parallel_omp(T a[], int size, T temp[], int threads, Cmp cmp);
template <typename T, typename Cmp>
void mergesort(T a[], int size, T temp[], int threads, Cmp cmp) {
// Enable nested parallelism, if available
omp_set_nested(1);
// Parallel mergesort
mergesort_parallel_omp<T, Cmp>(a, size, temp, threads, cmp);
}
// OpenMP merge sort with given number of threads
template <typename T, typename Cmp>
void mergesort_parallel_omp(T a[], int size, T temp[], int threads,
Cmp cmp) {
if (threads == 1) {
// printf("Thread %d begins serial merge sort\n",
// omp_get_thread_num());
mergesort_serial<T, Cmp>(a, size, temp, cmp);
} else if (threads > 1) {
// TODO remove num_threads?
#pragma omp parallel sections num_threads(2)
{
// printf("Thread %d begins recursive section\n",
// omp_get_thread_num());
#pragma omp section
{ // printf("Thread %d begins recursive call\n",
// omp_get_thread_num());
mergesort_parallel_omp<T, Cmp>(a, size / 2, temp, threads / 2,
cmp);
}
#pragma omp section
{ // printf("Thread %d begins recursive call\n",
// omp_get_thread_num());
mergesort_parallel_omp<T, Cmp>(a + size / 2, size - size / 2,
temp + size / 2,
threads - threads / 2, cmp);
}
// The above use of temp + size/2 is an essential change from the
// serial version
}
// Thread allocation is implementation dependent
// Some threads can execute multiple sections while others are idle
// Merge the two sorted sub-arrays through temp
merge<T, Cmp>(a, size, temp, cmp);
} else {
assert(false);
return;
}
}
template <typename T, typename Cmp>
void mergesort_serial(T a[], int size, T temp[], Cmp cmp) {
// Switch to insertion sort for small arrays
if (size < Small) {
insertion_sort<T, Cmp>(a, size, cmp);
return;
}
mergesort_serial<T, Cmp>(a, size / 2, temp, cmp);
mergesort_serial<T, Cmp>(a + size / 2, size - size / 2, temp, cmp);
// The above call will not work properly in an OpenMP program
// Merge the two sorted subarrays into a temp array
merge<T, Cmp>(a, size, temp, cmp);
}
template <typename T, typename Cmp>
void merge(T arr[], int size, T temp[], Cmp cmp) {
int i1 = 0;
int i2 = size / 2;
int tempi = 0;
while (i1 < size / 2 && i2 < size) {
if (cmp(arr[i1], arr[i2])) {
temp[tempi] = arr[i1];
i1++;
} else {
temp[tempi] = arr[i2];
i2++;
}
tempi++;
}
while (i1 < size / 2) {
temp[tempi] = arr[i1];
i1++;
tempi++;
}
while (i2 < size) {
temp[tempi] = arr[i2];
i2++;
tempi++;
}
// Copy sorted temp array into main array
std::copy(temp, temp + size, arr);
}
template <typename T, typename Cmp>
void insertion_sort(T arr[], int size, Cmp cmp) {
int i;
for (i = 0; i < size; i++) {
int j;
auto v = arr[i];
for (j = i - 1; j >= 0; j--) {
if (cmp(arr[j], v))
break;
arr[j + 1] = arr[j];
}
arr[j + 1] = v;
}
}
#endif /* end of include guard: OMP_MERGESORT_H_QHRJTEP9 */
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 3 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
/// The likelihood of a branch being taken.
enum Likelihood {
LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute.
LH_None, ///< No attribute set or branches of the IfStmt have
///< the same attribute.
LH_Likely ///< Branch has the [[likely]] attribute.
};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \returns the likelihood of a statement.
static Likelihood getLikelihood(const Stmt *S);
/// \returns the likelihood of the 'then' branch of an 'if' statement. The
/// 'else' branch is required to determine whether both branches specify the
/// same likelihood, which affects the result.
static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else);
/// \returns whether the likelihood of the branches of an if statement are
/// conflicting. When the first element is \c true there's a conflict and
/// the Attr's are the conflicting attributes of the Then and Else Stmt.
static std::tuple<bool, const Attr *, const Attr *>
determineLikelihoodConflict(const Stmt *Then, const Stmt *Else);
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LPL, SourceLocation RPL, Stmt *Then,
SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
1886.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 8)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
debug_test_system.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2015, Knut Reinert, FU Berlin
// Copyright (c) 2013 NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// The SeqAn testing infrastructure. Based on ideas from the OpenMS
// "ClassTest.h".
// ==========================================================================
// TODO(holtgrew): This could use some cleanup.
// SEQAN_NO_GENERATED_FORWARDS
#ifndef SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#define SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#include <iostream> // stdout, stderr
#include <iomanip>
#include <cstring> // strrpos
#include <cstdlib> // exit()
#include <cstdio>
#include <cstdarg> // va_start, va_list, va_end
#include <algorithm> // min()
#include <set>
#include <vector>
#include <string>
#include <typeinfo>
#ifdef PLATFORM_WINDOWS
#include <Windows.h> // DeleteFile()
#else // #ifdef PLATFORM_WINDOWS
#include <unistd.h> // unlink()
#include <sys/stat.h> // mkdir()
#include <dirent.h> // DIR
#if SEQAN_HAS_EXECINFO
#include <execinfo.h> // backtrace(), backtrace_symbols()
#endif // #if SEQAN_HAS_EXECINFO
#include <cxxabi.h> // __cxa_demangle()
#include <signal.h>
#endif // #ifdef PLATFORM_WINDOWS
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class Demangler
// ----------------------------------------------------------------------------
// Holds the name of a given C++ type T.
// NOTE(esiragusa): this class could become a subclass of CStyle String...
namespace seqan {
template <typename T>
struct Demangler
{
#ifdef PLATFORM_GCC
char *data_begin;
#else
const char *data_begin;
#endif
Demangler()
{
T t;
_demangle(*this, t);
}
Demangler(T const & t)
{
_demangle(*this, t);
}
~Demangler()
{
#ifdef PLATFORM_GCC
free(data_begin);
#endif
}
};
// ============================================================================
// Functions
// ============================================================================
// ----------------------------------------------------------------------------
// Function _demangle(Demangler)
// ----------------------------------------------------------------------------
template <typename T>
inline void _demangle(Demangler<T> & me, T const & t)
{
#ifdef PLATFORM_GCC
int status;
me.data_begin = abi::__cxa_demangle(typeid(t).name(), NULL, NULL, &status);
#else
me.data_begin = typeid(t).name();
#endif
}
// ----------------------------------------------------------------------------
// Function toCString(Demangler)
// ----------------------------------------------------------------------------
template <typename T>
inline const char * toCString(Demangler<T> const & me)
{
return me.data_begin;
}
}
/*!
* @defgroup AssertMacros Assertion and Check Macros
* @brief The assertion and check macros provided by SeqAn.
*
* Assertions are checks performed at runtime when debugging is enabled. Debugging is enabled by defining the
* preprocessor symbol <tt>SEQAN_ENABLE_DEBUG</tt> as <tt>1</tt> (the default is to set it to <tt>0</tt> if the common C
* macro <tt>NDEBUG</tt> is defined and to set it to <tt>1</tt> otherwise. When using the SeqAn build system or the
* CMake FindSeqAn.cmake module, this is automatically set appropriately.
*
* The SEQAN_CHECK and SEQAN_FAIL macro always lead to an exit of the program with a non-0 return value.
*/
/*!
* @macro AssertMacros#SEQAN_FAIL
* @headerfile <seqan/basic.h>
* @brief Force abortion of program, regardless of debugging settings.
*
* @signature SEQAN_FAIL(msg[, args]);
*
* @param[in] msg A format string.
* @param[in] args An optional list of arguments that are used for filling msg.
*
* @section Remarks
*
* Use this if something really unexpected happens inside your functions and there is no way to report this through the
* API. A good example would be logic errors, e.g. invalid values.
*
* @section Examples
*
* In the following example, the <tt>SEQAN_FAIL</tt> is there if a possible value is added to <tt>MyEnum</tt> but the
* function <tt>foo</tt> is not updated accordingly.
*
* @code{.cpp}
* enum MyEnum
* {
* VALUE_ONE,
* VALUE_TWO
* };
*
* bool foo(MyEnum x)
* {
* switch (x)
* {
* case VALUE_ONE:
* // do something
* return true;
* case VALUE_TWO:
* // do something
* return true;
* }
*
* SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x);
* return false;
* }
* @endcode
*/
#define SEQAN_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
/*!
* @macro AssertMacros#SEQAN_CHECK
* @headerfile <seqan/basic.h>
* @brief Force abortion of program if a condition is not met, regardless of debugging settings.
*
* @signature SEQAN_CHECK(condition, msg[, args]);
*
* @param[in] condition An expression that is checked.
* @param[in] msg A format string.
* @param[in] args An optional list of arguments.
*
* @section Remarks
*
* Use this if something really unexpected happens inside your functions and there is no way to report this through the
* API. A good example would be logic errors, e.g. invalid values.
*
* @section Examples
*
* In the following example, the <tt>SEQAN_CHECK</tt> stops program execution if a value is added to <tt>MyEnum</tt> but
* the function <tt>foo</tt> is not updated accordingly.
*
* @code{.cpp}
* enum MyEnum
* {
* VALUE_ONE,
* VALUE_TWO
* };
*
* bool foo(MyEnum x)
* {
* SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x);
*
* switch (x)
* {
* case VALUE_ONE:
* // do something
* return true;
* case VALUE_TWO:
* // do something
* return true;
* }
*
* return false; // Should never reach here, checked above with SEQAN_CHECK.
* }
* @endcode
*/
#define SEQAN_CHECK(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// SeqAn's has three global debug/testing levels: testing, debug and
// release. Depending on the level, the SEQAN_ASSERT_* and
// SEQAN_CHECKPOINT macros will be enabled.
//
// Note that this is independent of the <cassert> assertions and
// NDEBUG being defined.
//
// The levels are enabled by the values of the macros
// SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to
// 0, one disables the level and by setting the macro to 1, one
// enables a level. Enabling testing also enables debug, overriding a
// value of 0 for SEQAN_ENABLE_DEBUG.
//
// If the level is release (both the macros for debug and testing are
// 0), the assertions will be disabled. If the level is debug then
// the assertions will be enabled. If the level is testing then the
// checkpoint macros will also be enabled.
//
// The default is to enable debugging but disable testing.
//
// You can print the current level using the function seqan::printDebugLevel().
/*!
* @macro TestSystemMacros#SEQAN_ENABLE_TESTING
* @headerfile <seqan/basic.h>
* @brief Indicates whether testing is enabled.
*
* @signature SEQAN_ENABLE_TESTING
*
* When set to 1, testing is enabled. If it is undefined or set to 0, testing is disabled. This means the macros for
* the tests (SEQAN_BEGIN_TESTSUITE, SEQAN_DEFINE_TEST, SEQAN_CALL_TEST, and SEQAN_END_TESTSUITE) will be enabled. This
* makes failing assertions raise exceptions instead of calling <tt>abort()</tt> (which terminates the program).
*
* By default, this is set to 0.
*
* If you want to change this value in your C++ program code you have to define this value before including any SeqAn header!
*
* If set to 1 then @link TestSystemMacros#SEQAN_ENABLE_DEBUG @endlink is forced to 1 as well.
*
* @see TestSystemMacros#SEQAN_ENABLE_DEBUG
*/
// Set default for SEQAN_ENABLE_TESTING.
#ifndef SEQAN_ENABLE_TESTING
#define SEQAN_ENABLE_TESTING 0
#endif // #ifndef SEQAN_ENABLE_TESTING
/*!
* @macro TestSystemMacros#SEQAN_ENABLE_DEBUG
* @headerfile <seqan/basic.h>
* @brief Indicates whether debugging is enabled.
*
* @signature SEQAN_ENABLE_DEBUG
*
* When enabled (set to 1) then debugging is enabled. This means the assertion macros are expanded to actual test code.
* If debugging (and testing) is disabled then the SeqAn assertion macros expand to no instructions.
*
* By default, thi sis set to 0 if <tt>NDEBUG</tt> is defined and set to 1 if <tt>NDEBUG</tt> is not defined.
*
* If you want to change this value then you have to define this value before including any SeqAn header.
*
* Force-enabled if SEQAN_ENABLE_TESTING is set to 1.
*
* @see TestSystemMacros#SEQAN_ENABLE_TESTING
*/
// Set default for SEQAN_ENABLE_DEBUG.
#ifndef SEQAN_ENABLE_DEBUG
#ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 0
#else // #ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #ifdef NDEBUG
#endif // #ifndef SEQAN_ENABLE_DEBUG
// Force-enable debugging if testing is enabled.
#if SEQAN_ENABLE_TESTING
#undef SEQAN_ENABLE_DEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #if SEQAN_ENABLE_TESTING
// Allow disabling checkpoints independent of testing.
#ifndef SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING
#endif // #ifndef SEQAN_ENABLE_CHECKPOINTS
/*!
* @macro TestSystemMacros#SEQAN_TYPEDEF_FOR_DEBUG
* @headerfile <seqan/basic.h>
* @brief When using typedefs that are only used in debug mode then they have to be marked with macro.
*
* @signature SEQAN_TYPEDE_FOR_DEBUG
*
* @section Examples
*
* @code{.cpp}
* typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG;
* @endcode
*/
#if !SEQAN_ENABLE_DEBUG
#define SEQAN_TYPEDEF_FOR_DEBUG SEQAN_UNUSED
#else
#define SEQAN_TYPEDEF_FOR_DEBUG
#endif
namespace seqan {
// SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string
// literal with this value.
#if !defined(SEQAN_CXX_FLAGS_)
#define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET
#endif // !defined(SEQAN_CXX_FLAGS__)
#define SEQAN_MKSTRING_(str) # str
#define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str)
#define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_)
//#undef SEQAN_MKSTRING
//#undef SEQAN_MKSTRING_
/*!
* @fn printDebugLevel
* @headerfile <seqan/basic.h>
* @brief Print the current SeqAn debug level and the compiler flags to the given stream.
*
* @signature void printDebugLevel(stream);
*
* @param[in,out] stream A std::ostream where the information about the levels are streamed to.
*/
template <typename TStream>
void printDebugLevel(TStream & stream)
{
stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl;
stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl;
stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl;
stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl;
}
#if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
template <typename TSize>
void printStackTrace(TSize /*maxFrames*/)
{}
#else
// print a demangled stack backtrace of the caller function
// TODO(esiragusa): use Demangler.
template <typename TSize>
void printStackTrace(TSize maxFrames)
{
void * addrlist[256];
char temp[4096];
char addr[20];
char offset[20];
size_t size;
int status;
char * symname;
char * demangled;
std::cerr << std::endl << "stack trace:" << std::endl;
int addrlist_len = backtrace(addrlist, maxFrames);
char ** symbollist = backtrace_symbols(addrlist, addrlist_len);
for (int i = 1; i < addrlist_len; ++i)
{
offset[0] = 0;
addr[0] = 0;
demangled = NULL;
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
// ./sam2svg(_Z2f3v+0x10) [0x47200c]
// ./sam2svg(_Z2f2v+0xd) [0x472021]
// ./sam2svg(main+0x1367) [0x4735fc]
// /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6]
//
if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// MAC OS X FORMAT:
// 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21
// 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26
// 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980
// 4 sam2svg 0x00000001000021b9 _Z2f2v + 9
// 5 sam2svg 0x00000001000034b1 main + 4546
// 6 sam2svg 0x0000000100002190 start + 52
else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
else if (2 == sscanf(symbollist[i], "%s %s", temp, addr))
{
symname = temp;
}
// DEFAULT:
else
{
symname = symbollist[i];
}
std::cerr << std::setw(3) << i - 1;
std::cerr << std::setw(20) << addr;
std::cerr << " " << symname;
if (offset[0] != 0)
std::cerr << " + " << offset;
std::cerr << std::endl;
free(demangled);
}
std::cerr << std::endl;
// Only the array must be freed according to man page, not the contents.
free(symbollist);
}
static void signalHandlerPrintStackTrace(int signum)
{
std::cerr << std::endl;
printStackTrace(20);
signal(signum, SIG_DFL);
kill(getpid(), signum);
}
inline int _deploySignalHandlers()
{
signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault
signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero
// ...
return 0;
}
#if SEQAN_ENABLE_DEBUG
// automatically deploy signal handlers that output the stack trace on a trap (in debug mode)
template <typename T>
struct SignalHandlersDummy_
{
static const int i;
};
template <typename T>
const int SignalHandlersDummy_<T>::i = _deploySignalHandlers();
namespace {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-variable"
#endif // ifdef __clang__
volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i;
#ifdef __clang__
#pragma clang diagnostic pop
#endif // ifdef __clang__
}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
// Namespace for the testing infrastructure.
//
// This namespace contains the variables and functions that are used
// in the macros below to perform the tests.
namespace ClassTest {
// Raised when an assertion fails in test mode.
struct AssertionFailedException {};
// Container for static global data for the tests.
struct StaticData
{
// Number of tests that were run.
static int & testCount()
{
static int result = 0;
return result;
}
// Number of errors that occurred.
static int & errorCount()
{
static int result = 0;
return result;
}
// Number of skipped tests.
static int & skippedCount()
{
static int result = 0;
return result;
}
// Flag whether there was an error in this test.
static bool & thisTestOk()
{
static bool result = 0;
return result;
}
// Flag whether this test was skipped.
static bool & thisTestSkipped()
{
static bool result = 0;
return result;
}
// Name of the current test.
static const char * & currentTestName()
{
const char * defaultValue = "";
static const char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the binary. Extrapolated from __FILE__.
static char * & basePath()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
static char const * _computePathToRoot()
{
// Get path to include.
const char * file = __FILE__;
int pos = -1;
for (size_t i = 0; i < strlen(file) - strlen("include"); ++i)
{
if (strncmp(file + i, "include", strlen("include")) == 0)
{
pos = i;
}
}
for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos)
continue;
if (pos == -1)
{
std::cerr << "Could not extrapolate path to repository from __FILE__ == \""
<< __FILE__ << "\"" << std::endl;
exit(1);
}
static char buffer[1024];
strncpy(&buffer[0], file, pos);
buffer[pos - 1] = '\0';
return &buffer[0];
}
// Base path to the directory containing "core" and "extras."
// Extrapolated from __FILE__.
static char const * pathToRoot()
{
const char * result = 0;
if (!result)
result = _computePathToRoot();
return result;
}
// Total number of checkpoints in header file.
static int & totalCheckPointCount()
{
static int result = 0;
return result;
}
// Total number of checkpoints found in binary files.
static int & foundCheckPointCount()
{
static int result = 0;
return result;
}
// Names of temporary files as returned by tempFileName. This
// global state is used to remove any existing such files
// after completing the testsuite.
static::std::vector<std::string> & tempFileNames()
{
static::std::vector<std::string> filenames;
return filenames;
}
};
// Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet.
// TODO(holtgrew): Not used yet and Windows code does not work.
/*
inline
int openTempFile() {
#ifdef PLATFORM_WINDOWS
char * fileName = _tempnam(NULL, "SQN");
if (!fileName) {
::std::cerr << "Cannot create a unique temporary filename" << ::std::endl;
exit(1);
}
int result = open(fileName, _O_RDWR | OPEN_TEMPORARY);
free(fileName);
return result;
#else // A Unix...
char filenameBuffer[100];
strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX");
int result = mkstemp(filenameBuffer);
unlink(filenameBuffer);
return result;
#endif // ifdef PLATFORM_WINDOWS
}
*/
// Return the path to a temporary file, in a static buffer in this
// function. This is not thread safe!
inline
const char * tempFileName()
{
static char fileNameBuffer[1000];
#ifdef PLATFORM_WINDOWS
static char filePathBuffer[1000];
// Gets the temp path env string (no guarantee it's a valid path).
DWORD dwRetVal = 0;
dwRetVal = GetTempPath(1000, // length of the buffer
filePathBuffer); // buffer for path
if (dwRetVal > 1000 || (dwRetVal == 0))
{
std::cerr << "GetTempPath failed" << std::endl;
exit(1);
}
UINT uRetVal = 0;
uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files
TEXT("SEQAN."), // temp file name prefix
0, // create unique name
fileNameBuffer); // buffer for name
if (uRetVal == 0)
{
std::cerr << "GetTempFileName failed" << std::endl;
exit(1);
}
DeleteFile(fileNameBuffer);
CreateDirectoryA(fileNameBuffer, NULL);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "\\test_file");
return fileNameBuffer;
#else // ifdef PLATFORM_WINDOWS_VS
strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX");
mode_t cur_umask = umask(S_IRWXO | S_IRWXG); // to silence Coverity warning
int _tmp = mkstemp(fileNameBuffer);
(void) _tmp;
umask(cur_umask);
unlink(fileNameBuffer);
mkdir(fileNameBuffer, 0777);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "/test_file");
return fileNameBuffer;
#endif // ifdef PLATFORM_WINDOWS
}
// Initialize the testing infrastructure.
//
// Used through SEQAN_BEGIN_TESTSUITE(test_name)
inline
void beginTestSuite(const char * testSuiteName, const char * argv0)
{
// First things first: Print test suite name and current debug level.
std::cout << "TEST SUITE " << testSuiteName << std::endl;
printDebugLevel(std::cout);
(void)testSuiteName;
StaticData::testCount() = 0;
StaticData::skippedCount() = 0;
StaticData::errorCount() = 0;
StaticData::totalCheckPointCount() = 0;
StaticData::foundCheckPointCount() = 0;
// Get path to argv0.
const char * end = argv0;
const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /.
for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/')))
end = ptr;
int rpos = end - argv0;
if (rpos <= 0)
{
StaticData::basePath() = new char[2];
strcpy(StaticData::basePath(), ".");
}
else
{
int len = rpos;
StaticData::basePath() = new char[len];
strncpy(StaticData::basePath(), argv0, len);
}
#ifdef PLATFORM_WINDOWS_VS
// Set CRT reporting such that everything goes to stderr and there are
// no popups causing timeouts.
_set_error_mode(_OUT_TO_STDERR);
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
#endif // PLATFORM_WINDOWS_VS
}
// Run test suite finalization.
//
// Used through SEQAN_END_TESTSUITE
//
// Prints a bottom banner with the error count and returns the
// program's return code.
inline
int endTestSuite()
{
delete[] StaticData::basePath();
std::cout << "**************************************" << std::endl;
std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl;
std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl;
std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl;
std::cout << "--------------------------------------" << std::endl;
std::cout << " Total Tests: " << StaticData::testCount() << std::endl;
std::cout << " Skipped: " << StaticData::skippedCount() << std::endl;
std::cout << " Errors: " << StaticData::errorCount() << std::endl;
std::cout << "**************************************" << std::endl;
// TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1;
/*
if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount())
return 1;
*/
// Delete all temporary files that still exist.
for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i)
{
#ifdef PLATFORM_WINDOWS
HANDLE hFind;
WIN32_FIND_DATA data;
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*");
hFind = FindFirstFile(temp.c_str(), &data);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName;
if (strcmp(data.cFileName, ".") == 0 || strcmp(data.cFileName, "..") == 0)
continue; // Skip these.
if (!DeleteFile(tempp.c_str()))
std::cerr << "WARNING: Could not delete file " << tempp << "\n";
}
while (FindNextFile(hFind, &data));
FindClose(hFind);
}
if (!RemoveDirectory(StaticData::tempFileNames()[i].c_str()))
std::cerr << "WARNING: Could not delete directory " << StaticData::tempFileNames()[i] << "\n";
#else // #ifdef PLATFORM_WINDOWS
DIR * dpdf;
struct dirent * epdf;
dpdf = opendir(StaticData::tempFileNames()[i].c_str());
if (dpdf != NULL)
{
while ((epdf = readdir(dpdf)) != NULL)
{
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name);
unlink(temp.c_str());
}
}
rmdir(StaticData::tempFileNames()[i].c_str());
if (closedir(dpdf) != 0)
std::cerr << "WARNING: Could not delete directory " << StaticData::tempFileNames()[i] << "\n";
#endif // #ifdef PLATFORM_WINDOWS
}
if (StaticData::errorCount() != 0)
return 1;
return 0;
}
// Run test initialization.
inline
void beginTest(const char * testName)
{
StaticData::currentTestName() = testName;
StaticData::thisTestOk() = true;
StaticData::thisTestSkipped() = false;
StaticData::testCount() += 1;
}
// Run test finalization.
inline
void endTest()
{
if (StaticData::thisTestSkipped())
{
std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl;
}
else if (StaticData::thisTestOk())
{
std::cout << StaticData::currentTestName() << " OK" << std::endl;
}
else
{
std::cerr << StaticData::currentTestName() << " FAILED" << std::endl;
}
}
// Marks the current test as "skipped".
inline
void skipCurrentTest()
{
StaticData::thisTestSkipped() = true;
StaticData::skippedCount() += 1;
}
// Called by the macro SEQAN_ASSERT_FAIL.
inline void forceFail(const char * file, int line,
const char * comment, ...)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Similar to forceFail above, but accepting a va_list parameter.
inline void vforceFail(const char * file, int line,
const char * comment, va_list argp)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Same as forceFail above, but with comment set to 0.
inline void forceFail(const char * file, int line)
{
forceFail(file, line, 0);
}
// Called by the macro SEQAN_ASSERT_EQ.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2>
bool testEqual(char const * file, int line,
T1 const & value1, char const * expression1,
T2 const & value2, char const * expression2,
char const * comment, ...)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_IN_DELTA.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, ...)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testInDelta above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2, typename T3>
bool vtestInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, va_list argp)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testInDelta above, but with comment set to 0.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3)
{
return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0);
}
// Called by the macro SEQAN_ASSERT_NEQ.
//
// Tests that the given two value are not equal. Returns true iff
// the two values are equal.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testNotEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testNotEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testNotEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GEQ.
//
// Tests that the first value is greater than or equal to the
// second one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGt above, but with comment set to 0.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LEQ.
//
// Tests that the first value is less than or equal to the second
// one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLt above, but comment is 0.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to true.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testTrue above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testTrue above, but comment will automatically be set to 0.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_)
{
return testTrue(file, line, value_, expression_, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to false.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testFalse above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testFalse above, but comment will automatically be set to 0.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_)
{
return testFalse(file, line, value_, expression_, 0);
}
// Represents a check point in a file.
struct CheckPoint
{
// Path to the file.
const char * file;
// Line in the file.
unsigned int line;
// Less-than comparator for check points.
bool operator<(const CheckPoint & other) const
{
int c = strcmp(file, other.file);
if (c < 0)
return true;
if (c == 0 && line < other.line)
return true;
return false;
}
};
// Wrapper for a set of check points.
// TODO(holtgrew): Simply store the set?
struct CheckPointStore
{
static::std::set<CheckPoint> & data()
{
static::std::set<CheckPoint> result;
return result;
}
};
// Puts the given check point into the CheckPointStore's data.
inline bool
registerCheckPoint(unsigned int line, const char * file)
{
const char * file_name = strrchr(file, '/');
const char * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
CheckPoint cp = {file_name, line};
#ifdef _OMP
#pragma omp critical
#endif // #ifdef _OMP
CheckPointStore::data().insert(cp);
return true;
}
// Test whether the given check point exists in the check point
// store.
inline void
testCheckPoint(const char * file, unsigned int line)
{
StaticData::totalCheckPointCount() += 1;
CheckPoint cp = {file, line};
if (CheckPointStore::data().find(cp) == CheckPointStore::data().end())
{
std::cerr << file << ":" << line << " -- Check point lost."
<< std::endl;
return;
}
StaticData::foundCheckPointCount() += 1;
}
// Verify the check points for the given file.
inline void
verifyCheckPoints(const char * file)
{
char const * file_name = strrchr(file, '/');
char const * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
int len = strlen(StaticData::pathToRoot()) +
strlen("/") + strlen(file) + 1;
char * absolutePath = new char[len];
absolutePath[0] = '\0';
strcat(absolutePath, StaticData::pathToRoot());
strcat(absolutePath, "/");
strcat(absolutePath, file);
FILE * fl = ::std::fopen(absolutePath, "r");
delete[] absolutePath;
if (!fl)
{
std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl;
}
unsigned int line_number = 1;
char buf[1 << 16];
while (::std::fgets(buf, sizeof(buf), fl))
{
if (::std::strstr(buf, "SEQAN_CHECKPOINT"))
{
testCheckPoint(file_name, line_number);
}
++line_number;
}
::std::fclose(fl);
}
#if SEQAN_ENABLE_TESTING
// If in testing mode then raise an AssertionFailedException.
inline void fail()
{
StaticData::thisTestOk() = false;
printStackTrace(20);
throw AssertionFailedException();
}
#else
// If not in testing mode then quit with an abort.
inline void fail()
{
printStackTrace(20);
abort();
}
#endif // #if SEQAN_ENABLE_TESTING
} // namespace ClassTest
/*!
* @macro TestSystemMacros#SEQAN_DEFINE_TEST
* @headerfile <seqan/basic.h>
* @brief Expand to test definition.
*
* @signature SEQAN_DEFINE_TEST(test_name)
*
* This macro expands to the definition of a $void$ function with <tt>SEQAN_TEST_ + test_name</tt> as its name.
*
* @section Example
*
* @code{.cpp}
* SEQAN_DEFINE_TEST(test_name)
* {
* SEQAN_ASSERT_LT(0, 3);
* }
* @endcode
*/
// This macro expands to function header for one test.
#define SEQAN_DEFINE_TEST(test_name) \
template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \
void SEQAN_TEST_ ## test_name()
/*!
* @defgroup TestSystemMacros Test System Macros
* @brief Macros for the test system.
*/
/*!
* @macro TestSystemMacros#SEQAN_BEGIN_TESTSUITE
* @headerfile <seqan/basic.h>
* @brief Expand to a test suite beginning.
*
* @signature SEQAN_BEGIN_TESTSUITE(name)
*
* @param[in] name The name of the test suite.
*
* This macro expands to a <tt>main()</tt> function and some initialization code that sets up the test system.
*
* @section Examples
*
* @code{.cpp}
* #include <seqan/basic.h>
*
* SEQAN_BEGIN_TESTSUITE(test_foo)
* {
* SEQAN_CALL_TEST(test_foo_my_test);
* }
* SEQAN_END_TESTSUITE
* @endcode
*/
#if SEQAN_ENABLE_TESTING
// This macro expands to startup code for a test file.
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]);
/*!
* @macro TestSystemMacros#SEQAN_END_TESTSUITE
* @headerfile <seqan/basic.h>
* @brief Expand to test suite ending.
*
* @signature SEQAN_END_TESTSUITE
*
* This macro expands to finalization code for a test suite.
*
* @section Examples
*
* @code{.cpp}
* #include <seqan/basic.h>
*
* SEQAN_BEGIN_TESTSUITE(test_foo)
* {
* SEQAN_CALL_TEST(test_foo_my_test);
* }
* SEQAN_END_TESTSUITE
* @endcode
*/
// This macro expands to shutdown code for a test file.
#define SEQAN_END_TESTSUITE \
return ::seqan::ClassTest::endTestSuite(); \
}
/*!
* @macro TestSystemMacros#SEQAN_CALL_TEST
* @headerfile <seqan/basic.h>
* @brief Expand to calling a test.
*
* @signature SEQAN_CALL_TEST(test_name);
*
* This expects the test to be defined with SEQAN_DEFINE_TEST. This macro will expand to code that calls the code
* inside a try/catch block. Use this macro within a test suite, only.
*
* @section Examples
*
* @code{.cpp}
* // Within a test suite.
* SEQAN_CALL_TEST(test_name);
* @endcode
*/
// This macro expands to code to call a given test.
#define SEQAN_CALL_TEST(test_name) \
do { \
seqan::ClassTest::beginTest(# test_name); \
try { \
SEQAN_TEST_ ## test_name<true>(); \
} catch (seqan::ClassTest::AssertionFailedException e) { \
/* Swallow exception, go on with next test. */ \
(void) e; /* Get rid of unused variable warning. */ \
} catch (std::exception const & e) { \
std::cerr << "Unexpected exception of type " \
<< toCString(seqan::Demangler<std::exception>(e)) \
<< "; message: " << e.what() << "\n"; \
seqan::ClassTest::StaticData::thisTestOk() = false; \
seqan::ClassTest::StaticData::errorCount() += 1; \
} catch (...) { \
std::cerr << "Unexpected exception of unknown type\n"; \
seqan::ClassTest::StaticData::thisTestOk() = false; \
seqan::ClassTest::StaticData::errorCount() += 1; \
} \
seqan::ClassTest::endTest(); \
} while (false)
/*!
* @macro TestSystemMacros#SEQAN_SKIP_TEST
* @headerfile <seqan/basic.h>
* @brief Force the test to return without failing and mark it as skipped.
*
* @signature SEQAN_SKIP_TEST;
*
* @section Examples
*
* @code{.cpp}
* SEQAN_DEFINE_TEST(test_skipped)
* {
* SEQAN_SKIP_TEST;
* }
* @endcode
*/
// This macro returns from the current function and logs a "skipped"
// event for the current test.
#define SEQAN_SKIP_TEST \
do { \
::seqan::ClassTest::skipCurrentTest(); \
return; \
} while (false)
#endif // #if SEQAN_ENABLE_TESTING
// variadic macros are not supported by VS 2003 and before
#if !defined(_MSC_VER) || (_MSC_VER >= 1400)
#if SEQAN_ENABLE_DEBUG && !defined(__CUDA_ARCH__)
/*!
* @macro AssertMacros#SEQAN_ASSERT
* @headerfile <seqan/basic.h>
* @brief Test that the given expression can be coerced to <tt>true</tt>.
*
* @signature SEQAN_ASSERT(expression);
* @signature SEQAN_ASSERT_MSG(expression, message[, parameters]);
*
* @param[in] expression An expression to check for being true.
* @param[in] message A format string.
* @param[in] parameters An optional list of parameters.
*
* @section Remarks
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT @call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT(0); // will fail
* SEQAN_ASSERT(1); // will run through
* SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message.
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_NOT
* @headerfile <seqan/basic.h>
* @brief Test that the given expression can be coerced to <tt>false</tt>.
*
* @signature SEQAN_ASSERT_NOT(expression)
* @signature SEQAN_ASSERT_NOT_MSG(expression, message[, parameters])
*
* @param[in] expression An expression to check for being false.
* @param[in] message A format string.
* @param[in] parameters An optional list of parameters.
*
* @section Remarks
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_NOT(0); // will run through
* SEQAN_ASSERT_NOT(1); // will fail
* SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_EQ
* @headerfile <seqan/basic.h>
* @brief Test that two given expressions are equal, as defined by the matching call to the <tt>operator=(,)</tt>.
* @signature SEQAN_ASSERT_EQ(expression1, expression2);
* @signature SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters]);
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_EQ(0, false); // will run through
* SEQAN_ASSERT_EQ(1, false); // will fail
* SEQAN_ASSERT_EQ(1, "foo"); // will not compile
* SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_NEQ
* @headerfile <seqan/basic.h>
* @brief Test that two given expressions are not equal, as defined by the matching call to the <tt>operator!=(,)</tt>.
*
* @signature SEQAN_ASSERT_NEQ(expression1, expression2);
* @signature SEQAN_ASSERT_NEQ_MSG(expression1, expression2, comment[, parameters]);
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_NEQ(0, false); // will fail
* SEQAN_ASSERT_NEQ(1, false); // will run through
* SEQAN_ASSERT_NEQ(1, "foo"); // will not compile
* SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_LT
* @headerfile <seqan/basic.h>
* @brief Test that the two given expressions are in the less-than relation as defined by the matching call to
* operator<(,).
*
* @signature SEQAN_ASSERT_LT(expression1, expression2);
* @signature SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters]);
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_LT(0, 1); // will run through
* SEQAN_ASSERT_LT(1, 1); // will not run through
* SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_LEQ
*
* @brief Test that the two given expressions are in the less-than-or-equal
* relation as defined by the matching call to operator<=(,).
*
* @signature SEQAN_ASSERT_LEQ(expression1, expression2)
* @signature SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[,
* parameters])
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument
* on failures. Note that the <tt>operator<<</tt> to the type of
* <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT
* call.
*
* See SEQAN_CHECK and SEQAN_FAIL for
* (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_LEQ(1, 1); // will run through
* SEQAN_ASSERT_LEQ(1, 2); // will not run through
* SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_GT
*
* @brief Test that the two given expressions are in the greather-than relation
* as defined by the matching call to operator>(,).
*
* @signature SEQAN_ASSERT_GT(expression1, expression2);
* @signature SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters]);
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument
* on failures. Note that the <tt>operator<<</tt> to the type of
* <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT
* call.
*
* See SEQAN_CHECK and SEQAN_FAIL for
* (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_GT(2, 1); // will run through
* SEQAN_ASSERT_GT(1, 1); // will not run through
* SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_GEQ
*
* @brief Test that the two given expressions are in the greater-than-or-equal
* relation as defined by the matching call to operator>=(,).
*
* @signature SEQAN_ASSERT_GEQ(expression1, expression2);
* @signature SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters]);
*
* @param[in] expression1 The first expression.
* @param[in] expression2 The second expression.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_GEQ(1, 1); // will run through
* SEQAN_ASSERT_GEQ(0, 1); // will not run through
* SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message
* @endcode
*/
/*!
* @macro AssertMacros#SEQAN_ASSERT_IN_DELTA
*
* @brief Test that a value <tt>y</tt> lies within an <tt>delta</tt> environment of a value <tt>x</tt>.
*
* @signature SEQAN_ASSERT_IN_DELTA(x, y, delta);
* @signature SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters]);
*
* @param[in] x The value to center the environment in.
* @param[in] y The value to check whether it falls within the environment.
* @param[in] delta The environment size.
* @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message
* on failure.
* @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string
* <tt>comment</tt>.
*
* The main advantage of this macro is that it prints the values of its argument on failures. Note that the
* <tt>operator<<</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression
* parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call.
*
* See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings.
*
* @section Examples
*
* @code{.cpp}
* SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through
* SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail
* SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile
* SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message
* @endcode
*/
// Force a test failure.
//
// Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos);
#define SEQAN_ASSERT_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
// Equality assertion without a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Equality assertion with a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion without a comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1);
#define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion witha comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1");
#define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion without a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion with a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion without a comment.
#define SEQAN_ASSERT_LT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion with a comment.
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion without a comment.
#define SEQAN_ASSERT_GT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion with a comment.
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
//
// Usage: SEQAN_ASSERT(false);
#define SEQAN_ASSERT(_arg1) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
#define SEQAN_ASSERT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion without a comment.
//
// Usage: SEQAN_ASSERT_NOT(false);
#define SEQAN_ASSERT_NOT(_arg1) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion with a comment.
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
#elif SEQAN_ENABLE_DEBUG && defined(__CUDA_ARCH__)
#define SEQAN_ASSERT_EQ(_arg1, _arg2) do { assert(_arg1 == _arg2); } while (false)
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 == _arg2); } while (false)
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) do { assert(_arg1 != _arg2); } while (false)
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 != _arg2); } while (false)
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) do { assert(_arg1 <= _arg2); } while (false)
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 <= _arg2); } while (false)
#define SEQAN_ASSERT_LT(_arg1, _arg2) do { assert(_arg1 < _arg2); } while (false)
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do { assert(_arg1 < _arg2); } while (false)
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) do { assert(_arg1 >= _arg2); } while (false)
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 >= _arg2); } while (false)
#define SEQAN_ASSERT_GT(_arg1, _arg2) do { assert(_arg1 > _arg2); } while (false)
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do { assert(_arg1 > _arg2); } while (false)
#define SEQAN_ASSERT(_arg1) do { assert(_arg1); } while (false)
#define SEQAN_ASSERT_MSG(_arg1, ...) do { assert(_arg1); } while (false)
#define SEQAN_ASSERT_NOT(_arg1) do { assert(!_arg1); } while (false)
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do { assert(!_arg1); } while (false)
#define SEQAN_ASSERT_FAIL(...) do { assert(false); } while (false)
#else
#define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT(_arg1) do {} while (false)
#define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_NOT(_arg1) do {} while (false)
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_FAIL(...) do {} while (false)
#endif // #if defined(SEQAN_ENABLE_DEBUG) && !defined(__CUDA_ARCH__)
#else // no variadic macros
#if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...)
{
va_list args;
va_start(args, comment);
::seqan::ClassTest::vforceFail("", 0, comment, args);
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3)
{
if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testTrue("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testFalse("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
#else // #if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // no variadic macros
// Returns a string (of type char*) with the path to the called binary.
//
// Use this to locate files relative to the test binary.
#define SEQAN_PROGRAM_PATH \
::seqan::ClassTest::StaticData::basePath()
/*!
* @macro SEQAN_PATH_TO_ROOT
* @headerfile <seqan/basic.h>
* @brief Return path to the checkout root directory.
*
* @signature TCharPtr SEQAN_PATH_TO_ROOT()
*
* @return TCharPtr <tt>char const *</tt>, string with the path to the parent directory of the tests directory.
*
* @section Examples
*
* @code{.cpp}
* CharString buffer = SEQAN_PATH_TO_ROOT();
* append(buffer, "/tests/files/example.txt");
*
* FILE *f = fopen(toCString(buffer), "w");
* fprintf(f, "Test Data");
* fclose(f);
* @endcode
*
* @deprecated Unsafe.
* @see getAbsolutePath
* @see SEQAN_TEMP_FILENAME
*/
// TODO(holtgrew): Subject to change wiht restructuring.
// Returns a const char * string with the path to the projects directory.
#define SEQAN_PATH_TO_ROOT() \
::seqan::ClassTest::StaticData::pathToRoot()
// Returns the POSIX int file handle to an open file.
// TODO(holtgrewe): Uncomment if openTempFile has been implemented.
// #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile())
/*!
* @macro SEQAN_TEMP_FILENAME
* @headerfile <seqan/basic.h>
* @brief Generates the name to a temporary file.
*
* @signature TCharType SEQAN_TEMP_FILENAME();
*
* @return TCharType <tt>char const *</tt>, string with the path to a temporary file.
*
* @section Remarks
*
* The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you
* need it.
*
* @section Examples
*
* @code{.cpp}
* const char *p = SEQAN_TEMP_FILENAME();
* buffer char tempFilename[1000];
* strcpy(tempFilename, p);
* FILE *f = fopen(tempFilename, "w");
* fprintf(f, "Test Data");
* fclose(f);
* @endcode
* @see SEQAN_PATH_TO_ROOT
*/
// Returns a temporary filename.
#define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName())
#if SEQAN_ENABLE_CHECKPOINTS
// Create a check point at the point where the macro is placed.
// TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent.
#define SEQAN_CHECKPOINT \
::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__);
// Call the check point verification code for the given file.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
::seqan::ClassTest::verifyCheckPoints(filename)
#else // #if SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_CHECKPOINT
// If checkpoints are to be verified if testing is disabled then print
// a warning.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
do { \
fprintf(stderr, ("WARNING: Check point verification is " \
"disabled. Trying to verify %s from %s:%d.\n"), \
filename, __FILE__, __LINE__); \
} while (false)
#endif // #if SEQAN_ENABLE_CHECKPOINTS
#if !SEQAN_ENABLE_TESTING
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
(void) argv; \
fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n");
#define SEQAN_END_TESTSUITE \
return 0; \
}
#define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false)
#define SEQAN_SKIP_TEST do {} while (false)
#endif // #if !SEQAN_ENABLE_TESTING
// ----------------------------------------------------------------------------
// Function getAbsolutePath()
// ----------------------------------------------------------------------------
/*!
* @fn getAbsolutePath
* @headerfile <seqan/basic.h>
* @brief Returns absolute path for a filename within the source repository.
*
* @signature std::string getAbsolutePath(const char * filename)
*
* @return <tt>std::string</tt>, absolute path for a filename within the source repository.
*/
inline std::string getAbsolutePath(const char * path)
{
return std::string(SEQAN_PATH_TO_ROOT()) + "/" + path;
}
} // namespace seqan
#endif // SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
|
t_cholmod_super_numeric.c | /* ========================================================================== */
/* === Supernodal/t_cholmod_super_numeric =================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/Supernodal Module. Copyright (C) 2005-2012, Timothy A. Davis
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* Template routine for cholmod_super_numeric. All xtypes supported, except
* that a zomplex A and F result in a complex L (there is no supernodal
* zomplex L).
*/
/* ========================================================================== */
/* === complex arithmetic =================================================== */
/* ========================================================================== */
#include "cholmod_template.h"
#undef L_ENTRY
#undef L_CLEAR
#undef L_ASSIGN
#undef L_MULTADD
#undef L_ASSEMBLE
#undef L_ASSEMBLESUB
#ifdef REAL
/* -------------------------------------------------------------------------- */
/* A, F, and L are all real */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 1
#define L_CLEAR(Lx,p) Lx [p] = 0
#define L_ASSIGN(Lx,q, Ax,Az,p) Lx [q] = Ax [p]
#define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [q] += Ax [p] * f [0]
#define L_ASSEMBLE(Lx,q,b) Lx [q] += b [0]
#define L_ASSEMBLESUB(Lx,q,C,p) Lx [q] -= C [p]
#else
/* -------------------------------------------------------------------------- */
/* A and F are complex or zomplex, L and C are complex */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 2
#define L_CLEAR(Lx,p) Lx [2*(p)] = 0 ; Lx [2*(p)+1] = 0
#define L_ASSEMBLE(Lx,q,b) Lx [2*(q)] += b [0] ;
#define L_ASSEMBLESUB(Lx,q,C,p) \
Lx [2*(q) ] -= C [2*(p) ] ; \
Lx [2*(q)+1] -= C [2*(p)+1] ;
#ifdef COMPLEX
/* -------------------------------------------------------------------------- */
/* A, F, L, and C are all complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [2*(p) ] ; \
Lx [2*(q)+1] = Ax [2*(p)+1]
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [2*(p) ] * f [0] - Ax [2*(p)+1] * f [1] ; \
Lx [2*(q)+1] += Ax [2*(p)+1] * f [0] + Ax [2*(p) ] * f [1]
#else
/* -------------------------------------------------------------------------- */
/* A and F are zomplex, L and C is complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [p] ; \
Lx [2*(q)+1] = Az [p] ;
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [p] * f [0] - Az [p] * f [1] ; \
Lx [2*(q)+1] += Az [p] * f [0] + Ax [p] * f [1]
#endif
#endif
/* ========================================================================== */
/* === t_cholmod_super_numeric ============================================== */
/* ========================================================================== */
/* This function returns FALSE only if integer overflow occurs in the BLAS.
* It returns TRUE otherwise whether or not the matrix is positive definite. */
static int TEMPLATE (cholmod_super_numeric)
(
/* ---- input ---- */
cholmod_sparse *A, /* matrix to factorize */
cholmod_sparse *F, /* F = A' or A(:,f)' */
float beta [2], /* beta*I is added to diagonal of matrix to factorize */
/* ---- in/out --- */
cholmod_factor *L, /* factorization */
/* -- workspace -- */
cholmod_dense *Cwork, /* size (L->maxcsize)-by-1 */
/* --------------- */
cholmod_common *Common
)
{
float one [2], zero [2], tstart ;
float *Lx, *Ax, *Fx, *Az, *Fz, *C ;
Int *Super, *Head, *Ls, *Lpi, *Lpx, *Map, *SuperMap, *RelativeMap, *Next,
*Lpos, *Fp, *Fi, *Fnz, *Ap, *Ai, *Anz, *Iwork, *Next_save, *Lpos_save,
*Previous;
Int nsuper, n, j, i, k, s, p, pend, k1, k2, nscol, psi, psx, psend, nsrow,
pj, d, kd1, kd2, info, ndcol, ndrow, pdi, pdx, pdend, pdi1, pdi2, pdx1,
ndrow1, ndrow2, px, dancestor, sparent, dnext, nsrow2, ndrow3, pk, pf,
pfend, stype, Apacked, Fpacked, q, imap, repeat_supernode, nscol2, ss,
tail, nscol_new = 0;
/* ---------------------------------------------------------------------- */
/* declarations for the GPU */
/* ---------------------------------------------------------------------- */
/* these variables are not used if the GPU module is not installed */
#ifdef GPU_BLAS
Int ndescendants, mapCreatedOnGpu, supernodeUsedGPU,
idescendant, dlarge, dsmall, skips ;
int iHostBuff, iDevBuff, useGPU, GPUavailable ;
cholmod_gpu_pointers *gpu_p, gpu_pointer_struct ;
gpu_p = &gpu_pointer_struct ;
#endif
/* ---------------------------------------------------------------------- */
/* guard against integer overflow in the BLAS */
/* ---------------------------------------------------------------------- */
/* If integer overflow occurs in the BLAS, Common->status is set to
* CHOLMOD_TOO_LARGE, and the contents of Lx are undefined. */
Common->blas_ok = TRUE ;
/* ---------------------------------------------------------------------- */
/* get inputs */
/* ---------------------------------------------------------------------- */
nsuper = L->nsuper ;
n = L->n ;
C = Cwork->x ; /* workspace of size L->maxcsize */
one [0] = 1.0 ; /* ALPHA for *syrk, *herk, *gemm, and *trsm */
one [1] = 0. ;
zero [0] = 0. ; /* BETA for *syrk, *herk, and *gemm */
zero [1] = 0. ;
/* Iwork must be of size 2n + 5*nsuper, allocated in the caller,
* cholmod_super_numeric. The memory cannot be allocated here because the
* cholmod_super_numeric initializes SuperMap, and cholmod_allocate_work
* does not preserve existing workspace if the space needs to be increase
* in size. */
/* allocate integer workspace */
Iwork = Common->Iwork ;
SuperMap = Iwork ; /* size n (i/i/l) */
RelativeMap = Iwork + n ; /* size n (i/i/l) */
Next = Iwork + 2*((size_t) n) ; /* size nsuper*/
Lpos = Iwork + 2*((size_t) n) + nsuper ; /* size nsuper*/
Next_save = Iwork + 2*((size_t) n) + 2*((size_t) nsuper) ;/* size nsuper*/
Lpos_save = Iwork + 2*((size_t) n) + 3*((size_t) nsuper) ;/* size nsuper*/
Previous = Iwork + 2*((size_t) n) + 4*((size_t) nsuper) ;/* size nsuper*/
Map = Common->Flag ; /* size n, use Flag as workspace for Map array */
Head = Common->Head ; /* size n+1, only Head [0..nsuper-1] used */
Ls = L->s ;
Lpi = L->pi ;
Lpx = L->px ;
Super = L->super ;
Lx = L->x ;
#ifdef GPU_BLAS
/* local copy of useGPU */
if ( (Common->useGPU == 1) && L->useGPU)
{
/* Initialize the GPU. If not found, don't use it. */
useGPU = TEMPLATE2 (CHOLMOD (gpu_init))
(C, L, Common, nsuper, n, Lpi[nsuper]-Lpi[0], gpu_p) ;
}
else
{
useGPU = 0;
}
/* fprintf (stderr, "local useGPU %d\n", useGPU) ; */
#endif
#ifndef NTIMER
/* clear GPU / CPU statistics */
Common->CHOLMOD_CPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_CPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_CPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_CPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_GPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_GPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_GPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_GPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_CPU_GEMM_TIME = 0 ;
Common->CHOLMOD_CPU_SYRK_TIME = 0 ;
Common->CHOLMOD_CPU_TRSM_TIME = 0 ;
Common->CHOLMOD_CPU_POTRF_TIME = 0 ;
Common->CHOLMOD_GPU_GEMM_TIME = 0 ;
Common->CHOLMOD_GPU_SYRK_TIME = 0 ;
Common->CHOLMOD_GPU_TRSM_TIME = 0 ;
Common->CHOLMOD_GPU_POTRF_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME2 = 0 ;
#endif
stype = A->stype ;
if (stype != 0)
{
/* F not accessed */
Fp = NULL ;
Fi = NULL ;
Fx = NULL ;
Fz = NULL ;
Fnz = NULL ;
Fpacked = TRUE ;
}
else
{
Fp = F->p ;
Fi = F->i ;
Fx = F->x ;
Fz = F->z ;
Fnz = F->nz ;
Fpacked = F->packed ;
}
Ap = A->p ;
Ai = A->i ;
Ax = A->x ;
Az = A->z ;
Anz = A->nz ;
Apacked = A->packed ;
/* clear the Map so that changes in the pattern of A can be detected */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( n > 128 ) schedule (static)
for (i = 0 ; i < n ; i++)
{
Map [i] = EMPTY ;
}
/* If the matrix is not positive definite, the supernode s containing the
* first zero or negative diagonal entry of L is repeated (but factorized
* only up to just before the problematic diagonal entry). The purpose is
* to provide MATLAB with [R,p]=chol(A); columns 1 to p-1 of L=R' are
* required, where L(p,p) is the problematic diagonal entry. The
* repeat_supernode flag tells us whether this is the repeated supernode.
* Once supernode s is repeated, the factorization is terminated. */
repeat_supernode = FALSE ;
#ifdef GPU_BLAS
if ( useGPU )
{
/* Case of GPU, zero all supernodes at one time for better performance*/
TEMPLATE2 (CHOLMOD (gpu_clear_memory))(Lx, L->xsize,
CHOLMOD_OMP_NUM_THREADS);
}
#endif
/* ---------------------------------------------------------------------- */
/* supernodal numerical factorization */
/* ---------------------------------------------------------------------- */
for (s = 0 ; s < nsuper ; s++)
{
/* ------------------------------------------------------------------ */
/* get the size of supernode s */
/* ------------------------------------------------------------------ */
k1 = Super [s] ; /* s contains columns k1 to k2-1 of L */
k2 = Super [s+1] ;
nscol = k2 - k1 ; /* # of columns in all of s */
psi = Lpi [s] ; /* pointer to first row of s in Ls */
psx = Lpx [s] ; /* pointer to first row of s in Lx */
psend = Lpi [s+1] ; /* pointer just past last row of s in Ls */
nsrow = psend - psi ; /* # of rows in all of s */
PRINT1 (("====================================================\n"
"S "ID" k1 "ID" k2 "ID" nsrow "ID" nscol "ID" psi "ID" psend "
""ID" psx "ID"\n", s, k1, k2, nsrow, nscol, psi, psend, psx)) ;
/* ------------------------------------------------------------------ */
/* zero the supernode s */
/* ------------------------------------------------------------------ */
ASSERT ((size_t) (psx + nsrow*nscol) <= L->xsize) ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
#ifdef GPU_BLAS
if ( !useGPU )
#endif
{
/* Case of no GPU, zero individual supernodes */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
schedule (static) if ( pend - psx > 1024 )
for (p = psx ; p < pend ; p++) {
L_CLEAR (Lx,p);
}
}
/* ------------------------------------------------------------------ */
/* construct the scattered Map for supernode s */
/* ------------------------------------------------------------------ */
/* If row i is the kth row in s, then Map [i] = k. Similarly, if
* column j is the kth column in s, then Map [j] = k. */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( nsrow > 128 )
for (k = 0 ; k < nsrow ; k++)
{
PRINT1 ((" "ID" map "ID"\n", Ls [psi+k], k)) ;
Map [Ls [psi + k]] = k ;
}
/* ------------------------------------------------------------------ */
/* when using GPU, reorder supernodes by levels.*/
/* (all supernodes in a level are independent) */
/* ------------------------------------------------------------------ */
#ifdef GPU_BLAS
if ( useGPU )
{
TEMPLATE2 (CHOLMOD (gpu_reorder_descendants))
( Common, Super, &s, Lpi, Lpos, Head, Next, Previous,
&ndescendants, &tail, &mapCreatedOnGpu, gpu_p ) ;
}
#endif
/* ------------------------------------------------------------------ */
/* copy matrix into supernode s (lower triangular part only) */
/* ------------------------------------------------------------------ */
pk = psx ;
#pragma omp parallel for private ( p, pend, pfend, pf, i, j, imap, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if ( k2-k1 > 64 )
for (k = k1 ; k < k2 ; k++)
{
if (stype != 0)
{
/* copy the kth column of A into the supernode */
p = Ap [k] ;
pend = (Apacked) ? (Ap [k+1]) : (p + Anz [k]) ;
for ( ; p < pend ; p++)
{
/* row i of L is located in row Map [i] of s */
i = Ai [p] ;
if (i >= k)
{
/* This test is here simply to avoid a segfault. If
* the test is false, the numeric factorization of A
* is undefined. It does not detect all invalid
* entries, only some of them (when debugging is
* enabled, and Map is cleared after each step, then
* all entries not in the pattern of L are detected). */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] = Ax [p] ; */
L_ASSIGN (Lx,(imap+(psx+(k-k1)*nsrow)), Ax,Az,p) ;
}
}
}
}
else
{
float fjk[2];
/* copy the kth column of A*F into the supernode */
pf = Fp [k] ;
pfend = (Fpacked) ? (Fp [k+1]) : (p + Fnz [k]) ;
for ( ; pf < pfend ; pf++)
{
j = Fi [pf] ;
/* fjk = Fx [pf] ; */
L_ASSIGN (fjk,0, Fx,Fz,pf) ;
p = Ap [j] ;
pend = (Apacked) ? (Ap [j+1]) : (p + Anz [j]) ;
for ( ; p < pend ; p++)
{
i = Ai [p] ;
if (i >= k)
{
/* See the discussion of imap above. */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] += Ax [p] * fjk ; */
L_MULTADD (Lx,(imap+(psx+(k-k1)*nsrow)),
Ax,Az,p, fjk) ;
}
}
}
}
}
}
/* add beta to the diagonal of the supernode, if nonzero */
if (beta [0] != 0.0)
{
/* note that only the real part of beta is used */
pk = psx ;
for (k = k1 ; k < k2 ; k++)
{
/* Lx [pk] += beta [0] ; */
L_ASSEMBLE (Lx,pk, beta) ;
pk += nsrow + 1 ; /* advance to the next diagonal entry */
}
}
PRINT1 (("Supernode with just A: repeat: "ID"\n", repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* save/restore the list of supernodes */
/* ------------------------------------------------------------------ */
if (!repeat_supernode)
{
/* Save the list of pending descendants in case s is not positive
* definite. Also save Lpos for each descendant d, so that we can
* find which part of d is used to update s. */
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos_save [d] = Lpos [d] ;
Next_save [d] = Next [d] ;
}
}
else
{
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos [d] = Lpos_save [d] ;
Next [d] = Next_save [d] ;
}
}
/* ------------------------------------------------------------------ */
/* update supernode s with each pending descendant d */
/* ------------------------------------------------------------------ */
#ifndef NDEBUG
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
PRINT1 (("\nWill update "ID" with Child: "ID"\n", s, d)) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
}
PRINT1 (("\nNow factorizing supernode "ID":\n", s)) ;
#endif
#ifdef GPU_BLAS
/* initialize the buffer counter */
if ( useGPU ) {
Common->ibuffer = 0;
supernodeUsedGPU = 0;
idescendant = 0;
d = Head[s];
dnext = d;
dlarge = Next[d];
dsmall = tail;
GPUavailable = 1;
skips = 0;
}
else
{
dnext = Head[s];
}
#else
/* GPU module not installed */
dnext = Head[s];
#endif
while
#ifdef GPU_BLAS
( (!useGPU && (dnext != EMPTY))
|| (useGPU && (idescendant < ndescendants)))
#else
( dnext != EMPTY )
#endif
{
#ifdef GPU_BLAS
if ( useGPU ) {
/* Conditionally select the next descendant supernode to
* assemble.
* + first, select the largest descendant
* + subsequently, if gpu host buffers are available, select
* the largest remaining descendant for assembly on the GPU
* + otherwise select the smallest remaining descendant for
* assembly on the CPU
*
* The objective is to keep the GPU busy assembling the largest
* descendants, and simultaneously keep the CPU busy assembling
* the smallest descendants.
*
* As this is called for every descendent supernode, moving
* this code to t_cholmod_gpu incurs substantial overhead -
* ~20 GF/s on audikw_1 - so it is being left here.
*/
iHostBuff =
(Common->ibuffer) % CHOLMOD_HOST_SUPERNODE_BUFFERS;
cudaError_t cuErr;
if ( idescendant > 0 ) {
if ( GPUavailable == -1 || skips > 0) {
d = dsmall;
dsmall = Previous[dsmall];
skips--;
}
else {
cuErr = cudaEventQuery
( Common->updateCBuffersFree[iHostBuff] );
if ( cuErr == cudaSuccess ) {
/* buffers are available, so assemble a large
* descendant (anticipating that this will be
* assembled on the GPU) */
d = dlarge;
dlarge = Next[dlarge];
GPUavailable = 1;
skips = 0;
}
else {
/* buffers are not available, so the GPU is busy,
* so assemble a small descendant (anticipating
* that it will be assembled on the host) */
d = dsmall;
dsmall = Previous[dsmall];
GPUavailable = 0;
/* if the GPUs are busy, then do this many
* supernodes on the CPU before querying GPUs
* again. */
skips = CHOLMOD_GPU_SKIP;
}
}
}
idescendant++;
}
else
{
d = dnext;
}
#else
/* GPU module not installed at compile time */
d = dnext ;
#endif
/* -------------------------------------------------------------- */
/* get the size of supernode d */
/* -------------------------------------------------------------- */
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdx = Lpx [d] ; /* pointer to first row of d in Lx */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
ndrow = pdend - pdi ; /* # rows in all of d */
PRINT1 (("Child: ")) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
/* -------------------------------------------------------------- */
/* find the range of rows of d that affect rows k1 to k2-1 of s */
/* -------------------------------------------------------------- */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
pdx1 = pdx + p ; /* ptr to 1st row of d affecting s in Lx */
/* there must be at least one row remaining in d to update s */
ASSERT (pdi1 < pdend) ;
PRINT1 (("Lpos[d] "ID" pdi1 "ID" Ls[pdi1] "ID"\n",
Lpos[d], pdi1, Ls [pdi1])) ;
ASSERT (Ls [pdi1] >= k1 && Ls [pdi1] < k2) ;
for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; pdi2++) ;
ndrow1 = pdi2 - pdi1 ; /* # rows in first part of d */
ndrow2 = pdend - pdi1 ; /* # rows in remaining d */
/* rows Ls [pdi1 ... pdi2-1] are in the range k1 to k2-1. Since d
* affects s, this set cannot be empty. */
ASSERT (pdi1 < pdi2 && pdi2 <= pdend) ;
PRINT1 (("ndrow1 "ID" ndrow2 "ID"\n", ndrow1, ndrow2)) ;
DEBUG (for (p = pdi1 ; p < pdi2 ; p++)
PRINT1 (("Ls["ID"] "ID"\n", p, Ls[p]))) ;
/* -------------------------------------------------------------- */
/* construct the update matrix C for this supernode d */
/* -------------------------------------------------------------- */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except
* that k1:n-1 refers to all of the rows in L, but many of the
* rows are all zero. Supernode d holds columns kd1 to kd2-1 of L.
* Nonzero rows in the range k1:k2-1 are in the list
* Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows in the range
* k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. Let
* L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let
* L2 = L (Ls [pdi2 ... pdend], kd1:kd2-1). C is ndrow2-by-ndrow1.
* Let C1 be the first ndrow1 rows of C and let C2 be the last
* ndrow2-ndrow1 rows of C. Only the lower triangular part of C1
* needs to be computed since C1 is symmetric.
*/
/* maxcsize is the largest size of C for all pairs (d,s) */
ASSERT (ndrow2 * ndrow1 <= ((Int) L->maxcsize)) ;
/* compute leading ndrow1-by-ndrow1 lower triangular block of C,
* C1 = L1*L1' */
ndrow3 = ndrow2 - ndrow1 ; /* number of rows of C2 */
ASSERT (ndrow3 >= 0) ;
#ifdef GPU_BLAS
if ( useGPU ) {
/* set up GPU to assemble new supernode */
if ( GPUavailable == 1) {
if ( ndrow2 * L_ENTRY >= CHOLMOD_ND_ROW_LIMIT &&
ndcol * L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) {
if ( ! mapCreatedOnGpu ) {
TEMPLATE2 ( CHOLMOD (gpu_initialize_supernode))
( Common, nscol, nsrow, psi, gpu_p );
mapCreatedOnGpu = 1;
}
}
else {
/* we've reached the limit of GPU-eligible descendants
* flag to stop stop performing cudaEventQueries */
GPUavailable = -1;
}
}
}
#endif
#ifdef GPU_BLAS
if ( !useGPU
|| GPUavailable!=1
|| !TEMPLATE2 (CHOLMOD (gpu_updateC)) (ndrow1, ndrow2, ndrow,
ndcol, nsrow, pdx1, pdi1, Lx, C, Common, gpu_p))
#endif
{
/* GPU not installed, or not used */
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dsyrk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#else
BLAS_zherk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_TIME += SuiteSparse_time () - tstart ;
#endif
/* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C,
* C2 = L2*L1' */
if (ndrow3 > 0)
{
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1 */
ndrow, /* ndrow */
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#else
BLAS_zgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1, ndrow */
ndrow,
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_TIME +=
SuiteSparse_time () - tstart ;
#endif
}
/* ---------------------------------------------------------- */
/* construct relative map to assemble d into s */
/* ---------------------------------------------------------- */
DEBUG (CHOLMOD(dump_real) ("C", C, ndrow2, ndrow1, TRUE,
L_ENTRY, Common)) ;
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( ndrow2 > 64 )
for (i = 0 ; i < ndrow2 ; i++)
{
RelativeMap [i] = Map [Ls [pdi1 + i]] ;
ASSERT (RelativeMap [i] >= 0 && RelativeMap [i] < nsrow) ;
}
/* ---------------------------------------------------------- */
/* assemble C into supernode s using the relative map */
/* ---------------------------------------------------------- */
#pragma omp parallel for private ( j, i, px, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndrow1 > 64 )
for (j = 0 ; j < ndrow1 ; j++) /* cols k1:k2-1 */
{
ASSERT (RelativeMap [j] == Map [Ls [pdi1 + j]]) ;
ASSERT (RelativeMap [j] >= 0 && RelativeMap [j] < nscol) ;
px = psx + RelativeMap [j] * nsrow ;
for (i = j ; i < ndrow2 ; i++) /* rows k1:n-1 */
{
ASSERT (RelativeMap [i] == Map [Ls [pdi1 + i]]) ;
ASSERT (RelativeMap [i] >= j && RelativeMap[i] < nsrow);
/* Lx [px + RelativeMap [i]] -= C [i + pj] ; */
q = px + RelativeMap [i] ;
L_ASSEMBLESUB (Lx,q, C, i+ndrow2*j) ;
}
}
}
#ifdef GPU_BLAS
else
{
supernodeUsedGPU = 1; /* GPU was used for this supernode*/
Common->ibuffer++; /* gpu_updateC is asynchronous, so use
* the next host buffer for the next
* supernode */
Common->ibuffer = Common->ibuffer%
(CHOLMOD_HOST_SUPERNODE_BUFFERS*CHOLMOD_DEVICE_STREAMS);
}
#endif
/* -------------------------------------------------------------- */
/* prepare this supernode d for its next ancestor */
/* -------------------------------------------------------------- */
dnext = Next [d] ;
if (!repeat_supernode)
{
/* If node s is being repeated, Head [dancestor] has already
* been cleared (set to EMPTY). It must remain EMPTY. The
* dancestor will not be factorized since the factorization
* terminates at node s. */
Lpos [d] = pdi2 - pdi ;
if (Lpos [d] < ndrow)
{
dancestor = SuperMap [Ls [pdi2]] ;
ASSERT (dancestor > s && dancestor < nsuper) ;
/* place d in the link list of its next ancestor */
Next [d] = Head [dancestor] ;
Head [dancestor] = d ;
}
}
} /* end of descendant supernode loop */
#ifdef GPU_BLAS
if ( useGPU ) {
iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* combine updates assembled on the GPU with updates
* assembled on the CPU */
TEMPLATE2 ( CHOLMOD (gpu_final_assembly ))
( Common, Lx, psx, nscol, nsrow, supernodeUsedGPU,
&iHostBuff, &iDevBuff, gpu_p );
}
#endif
PRINT1 (("\nSupernode with contributions A: repeat: "ID"\n",
repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* factorize diagonal block of supernode s in LL' */
/* ------------------------------------------------------------------ */
/* The current supernode s is ready to factorize. It has been updated
* by all descendant supernodes. Let S = the current supernode, which
* holds rows k1:n-1 and columns k1:k2-1 of the updated matrix. It
* splits into two parts: the square diagonal block S1, and the
* rectangular part S2. Here, S1 is factorized into L1*L1' and
* overwritten by L1.
*
* If supernode s is being repeated, only factorize it up to but not
* including the column containing the problematic entry.
*/
nscol2 = (repeat_supernode) ? (nscol_new) : (nscol) ;
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD (gpu_lower_potrf))(nscol2, nsrow, psx, Lx,
&info, Common, gpu_p))
#endif
{
/* Note that the GPU will not be used for the triangular solve */
#ifdef GPU_BLAS
supernodeUsedGPU = 0;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
LAPACK_dpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#else
LAPACK_zpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_TIME += SuiteSparse_time ()- tstart ;
#endif
}
/* ------------------------------------------------------------------ */
/* check if the matrix is not positive definite */
/* ------------------------------------------------------------------ */
if (repeat_supernode)
{
/* the leading part has been refactorized; it must have succeeded */
info = 0 ;
/* zero out the rest of this supernode */
p = psx + nsrow * nscol_new ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
for ( ; p < pend ; p++)
{
/* Lx [p] = 0 ; */
L_CLEAR (Lx,p) ;
}
}
/* info is set to one in LAPACK_*potrf if blas_ok is FALSE. It is
* set to zero in dpotrf/zpotrf if the factorization was successful. */
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (info != 0)
{
/* Matrix is not positive definite. dpotrf/zpotrf do NOT report an
* error if the diagonal of L has NaN's, only if it has a zero. */
if (Common->status == CHOLMOD_OK)
{
ERROR (CHOLMOD_NOT_POSDEF, "matrix not positive definite") ;
}
/* L->minor is the column of L that contains a zero or negative
* diagonal term. */
L->minor = k1 + info - 1 ;
/* clear the link lists of all subsequent supernodes */
for (ss = s+1 ; ss < nsuper ; ss++)
{
Head [ss] = EMPTY ;
}
/* zero this supernode, and all remaining supernodes */
pend = L->xsize ;
for (p = psx ; p < pend ; p++)
{
/* Lx [p] = 0. ; */
L_CLEAR (Lx,p) ;
}
/* If L is indefinite, it still contains useful information.
* Supernodes 0 to s-1 are valid, similar to MATLAB [R,p]=chol(A),
* where the 1-based p is identical to the 0-based L->minor. Since
* L->minor is in the current supernode s, it and any columns to the
* left of it in supernode s are also all zero. This differs from
* [R,p]=chol(A), which contains nonzero rows 1 to p-1. Fix this
* by setting repeat_supernode to TRUE, and repeating supernode s.
*
* If Common->quick_return_if_not_posdef is true, then the entire
* supernode s is not factorized; it is left as all zero.
*/
if (info == 1 || Common->quick_return_if_not_posdef)
{
/* If the first column of supernode s contains a zero or
* negative diagonal entry, then it is already properly set to
* zero. Also, info will be 1 if integer overflow occured in
* the BLAS. */
Head [s] = EMPTY ;
#ifdef GPU_BLAS
if ( useGPU ) {
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
else
{
/* Repeat supernode s, but only factorize it up to but not
* including the column containing the problematic diagonal
* entry. */
repeat_supernode = TRUE ;
s-- ;
nscol_new = info - 1 ;
continue ;
}
}
/* ------------------------------------------------------------------ */
/* compute the subdiagonal block and prepare supernode for its parent */
/* ------------------------------------------------------------------ */
nsrow2 = nsrow - nscol2 ;
if (nsrow2 > 0)
{
/* The current supernode is columns k1 to k2-1 of L. Let L1 be the
* diagonal block (factorized by dpotrf/zpotrf above; rows/cols
* k1:k2-1), and L2 be rows k2:n-1 and columns k1:k2-1 of L. The
* triangular system to solve is L2*L1' = S2, where S2 is
* overwritten with L2. More precisely, L2 = S2 / L1' in MATLAB
* notation.
*/
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD(gpu_triangular_solve))
(nsrow2, nscol2, nsrow, psx, Lx, Common, gpu_p))
#endif
{
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dtrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#else
BLAS_ztrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_TIME += SuiteSparse_time () - tstart ;
#endif
}
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (!repeat_supernode)
{
/* Lpos [s] is offset of first row of s affecting its parent */
Lpos [s] = nscol ;
sparent = SuperMap [Ls [psi + nscol]] ;
ASSERT (sparent != EMPTY) ;
ASSERT (Ls [psi + nscol] >= Super [sparent]) ;
ASSERT (Ls [psi + nscol] < Super [sparent+1]) ;
ASSERT (SuperMap [Ls [psi + nscol]] == sparent) ;
ASSERT (sparent > s && sparent < nsuper) ;
/* place s in link list of its parent */
Next [s] = Head [sparent] ;
Head [sparent] = s ;
}
}
else
{
#ifdef GPU_BLAS
TEMPLATE2 ( CHOLMOD (gpu_copy_supernode) )
( Common, Lx, psx, nscol, nscol2, nsrow,
supernodeUsedGPU, iHostBuff, gpu_p);
#endif
}
Head [s] = EMPTY ; /* link list for supernode s no longer needed */
/* clear the Map (debugging only, to detect changes in pattern of A) */
DEBUG (for (k = 0 ; k < nsrow ; k++) Map [Ls [psi + k]] = EMPTY) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
if (repeat_supernode)
{
/* matrix is not positive definite; finished clean-up for supernode
* containing negative diagonal */
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
}
/* success; matrix is positive definite */
L->minor = n ;
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
#undef PATTERN
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
oskar_imager_rotate_coords.c | /*
* Copyright (c) 2016-2017, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "imager/private_imager.h"
#include "imager/oskar_imager.h"
#ifdef __cplusplus
extern "C" {
#endif
void oskar_imager_rotate_coords(oskar_Imager* h, size_t num_coords,
const oskar_Mem* uu_in, const oskar_Mem* vv_in, const oskar_Mem* ww_in,
oskar_Mem* uu_out, oskar_Mem* vv_out, oskar_Mem* ww_out)
{
#ifdef OSKAR_OS_WIN
int i;
const int num = (const int) num_coords;
#else
size_t i;
const size_t num = num_coords;
#endif
const double *M = h->M;
oskar_timer_resume(h->tmr_rotate);
if (oskar_mem_precision(uu_in) == OSKAR_SINGLE)
{
float *uu_o, *vv_o, *ww_o;
const float *uu_i, *vv_i, *ww_i;
uu_i = (const float*)oskar_mem_void_const(uu_in);
vv_i = (const float*)oskar_mem_void_const(vv_in);
ww_i = (const float*)oskar_mem_void_const(ww_in);
uu_o = (float*)oskar_mem_void(uu_out);
vv_o = (float*)oskar_mem_void(vv_out);
ww_o = (float*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0, s1, s2, t0, t1, t2;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
else
{
double *uu_o, *vv_o, *ww_o;
const double *uu_i, *vv_i, *ww_i;
uu_i = (const double*)oskar_mem_void_const(uu_in);
vv_i = (const double*)oskar_mem_void_const(vv_in);
ww_i = (const double*)oskar_mem_void_const(ww_in);
uu_o = (double*)oskar_mem_void(uu_out);
vv_o = (double*)oskar_mem_void(vv_out);
ww_o = (double*)oskar_mem_void(ww_out);
#pragma omp parallel for private(i)
for (i = 0; i < num; ++i)
{
double s0, s1, s2, t0, t1, t2;
s0 = uu_i[i]; s1 = vv_i[i]; s2 = ww_i[i];
t0 = M[0] * s0 + M[1] * s1 + M[2] * s2;
t1 = M[3] * s0 + M[4] * s1 + M[5] * s2;
t2 = M[6] * s0 + M[7] * s1 + M[8] * s2;
uu_o[i] = t0; vv_o[i] = t1; ww_o[i] = t2;
}
}
oskar_timer_pause(h->tmr_rotate);
}
#ifdef __cplusplus
}
#endif
|
gm_order.h | #ifndef GM_ORDER_H
#define GM_ORDER_H
#include <list>
#include "gm_internal.h"
#include "gm_bitmap.h"
template<typename T>
class gm_order
{
public:
gm_order(int _max_sz, int _max_thread = 16) :
max_thread(_max_thread), max_sz(_max_sz) {
local_Q_front = new std::list<T>[max_thread];
local_Q_back = new std::list<T>[max_thread];
bitmap = new unsigned char[(max_sz + 7) / 8];
for (int i = 0; i < (max_sz + 7) / 8; i++)
bitmap[i] = 0;
}
virtual ~gm_order() {
delete[] local_Q_front;
delete[] local_Q_back;
delete[] bitmap;
}
//------------------------------------------------------------
// API
// push_back/front, pop_back/front, clear, get_size
// push has separate parallel interface
//------------------------------------------------------------
void push_back(T e) // sequential
{
if (!_gm_get_bit(bitmap, e)) {
_gm_set_bit(bitmap, e);
Q.push_back(e);
}
}
void push_front(T e) {
if (!_gm_get_bit(bitmap, e)) {
_gm_set_bit(bitmap, e);
Q.push_front(e);
}
}
T pop_back() {
T e = Q.back();
_gm_clear_bit(bitmap, e);
Q.pop_back();
return e;
}
T pop_front() {
T e = Q.front();
_gm_clear_bit(bitmap, e);
Q.pop_front();
return e;
}
void clear() {
Q.clear();
#pragma omp parallel for
for (int i = 0; i < (max_sz + 7) / 8; i++)
bitmap[i] = 0;
}
size_t get_size() {
return Q.size();
}
bool is_in(T e) {
return (_gm_get_bit(bitmap, e) == 1);
}
// for parallel execution
void push_back_par(T e, int tid) {
if (!_gm_get_bit(bitmap, e)) { // test and atomic
if (_gm_set_bit_atomic(bitmap, e)) {
local_Q_back[tid].push_back(e);
}
}
}
void push_front_par(T e, int tid) {
if (!_gm_get_bit(bitmap, e)) { // test and atomic
if (_gm_set_bit_atomic(bitmap, e)) {
local_Q_back[tid].push_front(e);
}
}
}
//-------------------------------------------
// called when parallel addition is finished
//-------------------------------------------
void merge() {
for (int i = 0; i < max_thread; i++) {
if (local_Q_front[i].size() > 0) Q.splice(Q.begin(), local_Q_front[i]);
if (local_Q_back[i].size() > 0) Q.splice(Q.end(), local_Q_back[i]);
}
}
// for sequential iteration
typename std::list<T>& get_list() {
return Q;
}
//-----------------------------------------------
// for iteration
//-----------------------------------------------
// todo, correctly use nested template def
#define ITERATOR_CLASS(CLASS_NAME, LIST_ITER_TYPE) \
class CLASS_NAME {\
public: \
CLASS_NAME(typename LIST_ITER_TYPE I, typename LIST_ITER_TYPE E) \
: ITER(I), END_ITER(E) {} \
inline bool has_next() { \
return (ITER != END_ITER); \
} \
inline T get_next() \
{ T t = *ITER; ITER++; return t;} \
private: \
typename LIST_ITER_TYPE ITER; \
typename LIST_ITER_TYPE END_ITER; \
};
ITERATOR_CLASS(seq_iter, std::list<T>::iterator)
;ITERATOR_CLASS(rev_iter, std::list<T>::reverse_iterator)
;
#undef ITERATOR_CLASS
class par_iter
{
public:
par_iter(typename std::list<T>::iterator I, typename std::list<T>::iterator E) :
ITER(I), END_ITER(E), is_small(true), bitmap(NULL) {
}
par_iter(unsigned char* B, T I, T E) :
bitmap(B), ITER(I), END_ITER(E), is_small(false) {
}
inline bool has_next() {
if (is_small)
return (ITER != END_ITER);
else {
while (IDX < END_IDX) {
if (_gm_check_bit(bitmap, IDX) == 0) return true;
IDX++;
}
return false;
}
}
inline T get_next() {
if (is_small) {
T t = *ITER;
ITER++;
return t;
} else {
return IDX++;
}
}
private:
bool is_small;
unsigned char* bitmap;
typename std::set<T>::iterator ITER; // for small instance use
typename std::set<T>::iterator END_ITER; // for small instance use
T IDX;
T END_IDX;
};
seq_iter prepare_seq_iteration() {
seq_iter I(Q.begin(), Q.end());
return I;
}
rev_iter prepare_rev_iteration() {
rev_iter I(Q.rbegin(), Q.rend());
return I;
}
par_iter prepare_par_iteration(int thread_id, int max_threads) {
bool is_small = (Q.size() < THRESHOLD_LARGE);
if (is_small) {
// for small instance, use single thread
if (thread_id == 0) {
par_iter I(Q.begin(), Q.end());
return I;
} else {
par_iter I(Q.end(), Q.end());
return I;
}
} else {
size_t cnt = max_sz / max_threads;
T begin = cnt * thread_id;
T end = (thread_id == (max_threads - 1)) ? max_sz : begin + cnt;
par_iter I(bitmap, begin, end);
return I;
}
}
private:
gm_order() : max_sz(-1), max_thread(-1), bitmap(NULL), local_Q_front(NULL), local_Q_back(NULL) {
} // initialize without size is prohibited
typename std::list<T> Q;
typename std::list<T>* local_Q_front;
typename std::list<T>* local_Q_back;
int max_thread;
int max_sz;
unsigned char* bitmap;
static const int THRESHOLD_LARGE = 4096;
};
typedef gm_order<node_t> gm_node_order;
typedef gm_order<edge_t> gm_edge_order;
#endif
|
GB_unaryop__ainv_fp64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_bool
// op(A') function: GB_tran__ainv_fp64_bool
// C type: double
// A type: bool
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_bool
(
double *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target-30.c | extern void abort (void);
#pragma omp declare target
int v = 6;
#pragma omp end declare target
int
main ()
{
#pragma omp target /* predetermined map(tofrom: v) */
v++;
#pragma omp target update from (v)
if (v != 7)
abort ();
#pragma omp parallel private (v) num_threads (1)
{
#pragma omp target /* predetermined firstprivate(v) */
v++;
}
#pragma omp target update from (v)
if (v != 7)
abort ();
return 0;
}
|
elu_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "elu_kernel_arm.h"
#include "neon_mathfun.h"
#include <math.h>
#include <arm_neon.h>
static void elu_kernel(int i, int id, void* data, const float* input, float* output, float alpha)
{
int elem_num = (( int* )data)[0];
float32x4_t _one = vdupq_n_f32(1.f);
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _alpha = vdupq_n_f32(alpha);
const float* cur_input = input + id * elem_num;
float* cur_output = output + id * elem_num;
for (int i = 0; i < (elem_num & -4); i += 4)
{
float32x4_t _p = vld1q_f32(cur_input);
uint32x4_t _lemask = vcleq_f32(_p, _zero);
float32x4_t _nps = exp_ps(_p);
_nps = vsubq_f32(_nps, _one);
_nps = vmulq_f32(_nps, _alpha);
_p = vbslq_f32(_lemask, _nps, _p);
vst1q_f32(cur_output, _p);
cur_input += 4;
cur_output += 4;
}
for (int i = elem_num & ~3; i < elem_num; i++)
{
if (*cur_input < 0.f)
*cur_output = (exp(*cur_input) - 1.f) * alpha;
else
*cur_output = *cur_input;
cur_input++;
cur_output++;
}
}
int elu_run(struct tensor* output_tensor, struct tensor* input_tensor, struct elu_param* elu_param,
int num_thread)
{
float* data = ( float* )input_tensor->data;
float* out_data = ( float* )output_tensor->data;
float alpha = elu_param->alpha;
int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]);
int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
elu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha);
}
return 0;
}
|
residual_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
#include "residual_mex.h"
void residualf(float *r,
const float *f, const float *x, const uint8_t *G,
const double *h, const size_t *sz);
void residuald(double *r,
const double *f, const double *x, const uint8_t *G,
const double *h, const size_t *sz);
#ifdef RESIDUAL_MEX
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 5) || (nlhs > 1)) {
mexErrMsgTxt("Usage: residual_mex(r, f, x, G, h);");
}
const uint8_t *G = (const uint8_t *)mxGetData(prhs[3]);
const double *h = (const double *)mxGetData(prhs[4]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
if (mxIsSingle(prhs[0])) {
float *r = (float *)mxGetData(prhs[0]);
const float *f = (const float *)mxGetData(prhs[1]);
const float *x = (const float *)mxGetData(prhs[2]);
residualf(r, f, x, G, h, sz);
} else {
double *r = (double *)mxGetData(prhs[0]);
const double *f = (const double *)mxGetData(prhs[1]);
const double *x = (const double *)mxGetData(prhs[2]);
residuald(r, f, x, G, h, sz);
}
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
#endif
void
mx_residual(mxArray *mxr,
const mxArray *mxf, const mxArray *mxx, const mxArray *mxG,
const mxArray *mxh)
{
const uint8_t *G = (const uint8_t *)mxGetData(mxG);
const double *h = (const double *)mxGetData(mxh);
const size_t *sz = (const size_t *)mxGetDimensions(mxf);
if (mxIsSingle(mxr)) {
float *r = (float *)mxGetData(mxr);
const float *f = (const float *)mxGetData(mxf);
const float *x = (const float *)mxGetData(mxx);
residualf(r, f, x, G, h, sz);
} else {
double *r = (double *)mxGetData(mxr);
const double *f = (const double *)mxGetData(mxf);
const double *x = (const double *)mxGetData(mxx);
residuald(r, f, x, G, h, sz);
}
return;
}
void
residualf(float *r,
const float *f, const float *x, const uint8_t *G,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(1.0/(h[0]*h[0]));
const float hy = (float)(1.0/(h[1]*h[1]));
const float hz = (float)(1.0/(h[2]*h[2]));
const float hh = (float)(-2.0*(hx+hy+hz));
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (G[l]) {
r[l] = f[l] +
(hh*x[l] +
hx*(x[l-1] + x[l+1]) +
hy*(x[l-nx] + x[l+nx]) +
hz*(x[l-nxny] + x[l+nxny]));
}
}
}
}
return;
}
void
residuald(double *r,
const double *f, const double *x, const uint8_t *G,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = 1.0/(h[0]*h[0]);
const double hy = 1.0/(h[1]*h[1]);
const double hz = 1.0/(h[2]*h[2]);
const double hh = -2.0*(hx+hy+hz);
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (G[l]) {
r[l] = f[l] +
(hh*x[l] +
hx*(x[l-1] + x[l+1]) +
hy*(x[l-nx] + x[l+nx]) +
hz*(x[l-nxny] + x[l+nxny]));
}
}
}
}
return;
}
|
2mm.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* 2mm.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "2mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk),
DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj),
DATA_TYPE POLYBENCH_2D(C, NJ, NL, nj, nl),
DATA_TYPE POLYBENCH_2D(D, NI, NL, ni, nl))
{
int i, j;
*alpha = 1.5;
*beta = 1.2;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = (DATA_TYPE) ((i * j + 1) % ni) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = (DATA_TYPE) (i * (j + 1) % nj) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nl; j++)
C[i][j] = (DATA_TYPE) ((i * (j + 3) + 1) % nl) / nl;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++)
D[i][j] = (DATA_TYPE) (i * (j + 2) % nk) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(D, NI, NL, ni, nl))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("D");
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++)
{
if ((i * ni + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, D[i][j]);
}
POLYBENCH_DUMP_END("D");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_2mm(int ni, int nj, int nk, int nl,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(tmp, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk),
DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj),
DATA_TYPE POLYBENCH_2D(C, NJ, NL, nj, nl),
DATA_TYPE POLYBENCH_2D(D, NI, NL, ni, nl))
{
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, nk, alpha, A, B)
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NJ; j++)
{
tmp[i][j] = SCALAR_VAL(0.0);
for (k = 0; k < _PB_NK; ++k)
tmp[i][j] += alpha * A[i][k] * B[k][j];
}
}
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nl, beta, nj, tmp, C)
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NL; j++)
{
D[i][j] *= beta;
for (k = 0; k < _PB_NJ; ++k)
D[i][j] += tmp[i][k] * C[k][j];
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(tmp, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, &alpha, &beta,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_2mm (ni, nj, nk, nl,
alpha, beta,
POLYBENCH_ARRAY(tmp),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(D)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(tmp);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
return 0;
}
|
world.c | /*
* world.c
*
* @author: phdenzel
*
* DYDAMA universe properties
*
*/
#include "world.h"
#include "sort.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
void init_particleProperties(particle_t *p, double m, double size, char* type) {
// initialize particle properties
p->m = m;
p->size = size;
p->type = type;
}
void set_particleRVA(particle_t *p, int dimension,
double x, double v, double a) {
// initialize particle position/velocity/acceleration in dimension
vector4_setIndex(&p->r, dimension, x);
vector4_setIndex(&p->v, dimension, v);
vector4_setIndex(&p->a, dimension, a);
}
void set_particleR(particle_t *p, double x, double y, double z) {
// set the particles position r
vector4_set(&p->r, 0, x, y, z);
}
void set_particleV(particle_t *p, double vx, double vy, double vz) {
// set the particles velocity v
vector4_set(&p->v, 0, vx, vy, vz);
}
void set_particleA(particle_t *p, double ax, double ay, double az) {
// set the particles acceleration a
vector4_set(&p->a, 0, ax, ay, az);
}
void set_totalMass(universe *u) {
// set the total mass parameter of u
assert(u->N > 0 || u->particles != NULL);
double totalMass = 0;
for (int i = 0; i < u->N; i++) {
totalMass += u->particles[i].m;
}
u->Mtot = totalMass;
}
void set_projection(universe *u,
double xmin, double xmax, double ymin, double ymax) {
// set the projection parameters of u
u->proj[0] = xmin;
u->proj[1] = xmax;
u->proj[2] = ymin;
u->proj[3] = ymax;
}
inline int sgn(double val) {
return (val > 0) - (val < 0);
}
inline void gForceUpdate(universe *u) {
// calculate gravitational force and update particle accelerations
double G = u->G;
switch (u->D) {
case 1: { // 1D gravity
int this, other;
double a, this_x;
#pragma omp simd
for (this = 0; this < u->N; this++) {
u->particles[this].a.x = 0;
}
#pragma omp simd
for (this = 0; this < u->N-1; this++) {
this_x = u->particles[this].r.x;
for (other = this+1; other < u->N; other++) {
// copysign seems to be faster than the sgn function
a = G * copysign(1, u->particles[other].r.x - this_x);
u->particles[this].a.x += a*u->particles[other].m;
u->particles[other].a.x -= a*u->particles[this].m;
}
}
} break;
case 2: { // 2D gravity
printf("2D gravity not yet implemented!\n");
} break;
case 3: { // 3D gravity
int this, other;
// reset forces
for (this = 0; this < u->N; this++) {
vector4_set(&u->particles[this].a, 0, 0, 0, 0);
}
double n;
vector4_t r;
#pragma omp simd
for (this = 0; this < u->N-1; this++) {
for (other = this+1; other < u->N; other++) {
r = vector4_subtractcpy(&u->particles[this].r, &u->particles[other].r);
n = 1./vector4_length(&r);
n = n*n*n;
n *= u->G;
vector4_scale(&r, n*u->particles[other].m);
vector4_add(&u->particles[this].a, &r);
vector4_scale(&r, -1*u->particles[this].m/u->particles[other].m);
vector4_add(&u->particles[other].a, &r);
}
}
} break;
}
}
inline void gForceFast(universe *u) {
// sort the particles first and then calculate accelerations
// only possible if particle masses are all equal
double G = u->G;
switch (u->D) {
case 1: { // 1D gravity
int i;
double Meff = u->Mtot;
xQuickSort(u, 0, u->N-1);
#pragma omp simd
for (i = 0; i < u->N/2; i++) {
Meff -= u->particles[i].m;
u->particles[i].a.x = G*Meff;
u->particles[u->N-i-1].a.x = -G*Meff;
Meff -= u->particles[i].m;
}
} break;
case 2: { // 2D gravity
printf("2D gravity not yet implemented!\n");
} break;
case 3: { // 3D gravity
printf("3D gravity not yet implemented!\n");
} break;
}
}
void drift(particle_t *p, double dt, unsigned char dimensions) {
// perform a drift step for particle p of amount dt
switch (dimensions) {
case 1: { // 1D x-coordinate drift
p->r.x += p->v.x * dt;
} break;
case 2: { // 2D x and y-coordinate drift
p->r.x += p->v.x * dt;
p->r.y += p->v.y * dt;
//vector4_t vdt = vector4_scalecpy(&p->v, dt);
//p->r = vector4_addcpy(&p->r, &vdt);
} break;
case 3: { // full 3D drift
vector4_t vdt = vector4_scalecpy(&p->v, dt);
vector4_add(&p->r, &vdt);
} break;
}
}
void kick(particle_t *p, double dt, unsigned char dimensions) {
// perform a drift step for particle p of amount dt
switch (dimensions) {
case 1: { // 1D x-coordinate drift
p->v.x += p->a.x * dt;
} break;
case 2: { // 2D x and y-coordinate drift
p->v.x += p->a.x * dt;
p->v.y += p->a.y * dt;
//vector4_t adt = vector4_scalecpy(&p->a, dt);
//p->v = vector4_addcpy(&p->v, &adt);
} break;
case 3: { // full 3D drift
vector4_t adt = vector4_scalecpy(&p->a, dt);
vector4_add(&p->v, &adt);
} break;
}
}
void drift_halfStep(universe *u) {
// perform a half drift step for all particles in universe u
for (int i = 0; i < u->N; i++) {
drift(&u->particles[i], u->dt/2, u->D);
}
}
void kick_fullStep(universe *u) {
// perform a full kick step of all particles in universe u
for (int i = 0; i < u->N; i++) {
kick(&u->particles[i], u->dt, u->D);
}
}
inline void evolve(universe *u) {
// perform a leap frog step for all particles in universe u
drift_halfStep(u);
gForceUpdate(u);
kick_fullStep(u);
drift_halfStep(u);
}
inline void evolveFast(universe *u) {
// perform a leap frog step with improved force calculation
drift_halfStep(u);
gForceFast(u);
kick_fullStep(u);
drift_halfStep(u);
}
|
GraphReconstructor.h | //
// Copyright (C) 2015-2020 Yahoo Japan Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma once
#include <unordered_map>
#include <unordered_set>
#include <list>
#include "defines.h"
#ifdef _OPENMP
#include <omp.h>
#else
#warning "*** OMP is *NOT* available! ***"
#endif
namespace NGT {
class GraphReconstructor {
public:
static void extractGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &graphIndex) {
graph.reserve(graphIndex.repository.size());
for (size_t id = 1; id < graphIndex.repository.size(); id++) {
if (id % 1000000 == 0) {
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor::extractGraph: Processed " + std::to_string(id) + " objects.");
// std::cerr << "GraphReconstructor::extractGraph: Processed " << id << " objects." << std::endl;
}
try {
NGT::GraphNode &node = *graphIndex.getNode(id);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
NGT::ObjectDistances nd;
nd.reserve(node.size());
for (auto n = node.begin(graphIndex.repository.allocator); n != node.end(graphIndex.repository.allocator); ++n) {
nd.push_back(ObjectDistance((*n).id, (*n).distance));
}
graph.push_back(nd);
#else
graph.push_back(node);
#endif
if (graph.back().size() != graph.back().capacity()) {
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor::extractGraph: Warning! The graph size must be the same as the capacity. " + std::to_string(id));
// std::cerr << "GraphReconstructor::extractGraph: Warning! The graph size must be the same as the capacity. " << id << std::endl;
}
} catch(NGT::Exception &err) {
graph.push_back(NGT::ObjectDistances());
continue;
}
}
}
static void
adjustPaths(NGT::Index &outIndex)
{
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("construct index is not implemented.");
// std::cerr << "construct index is not implemented." << std::endl;
exit(1);
#else
NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex());
size_t rStartRank = 0;
std::list<std::pair<size_t, NGT::GraphNode> > tmpGraph;
for (size_t id = 1; id < outGraph.repository.size(); id++) {
NGT::GraphNode &node = *outGraph.getNode(id);
tmpGraph.push_back(std::pair<size_t, NGT::GraphNode>(id, node));
if (node.size() > rStartRank) {
node.resize(rStartRank);
}
}
size_t removeCount = 0;
for (size_t rank = rStartRank; ; rank++) {
bool edge = false;
Timer timer;
for (auto it = tmpGraph.begin(); it != tmpGraph.end();) {
size_t id = (*it).first;
try {
NGT::GraphNode &node = (*it).second;
if (rank >= node.size()) {
it = tmpGraph.erase(it);
continue;
}
edge = true;
if (rank >= 1 && node[rank - 1].distance > node[rank].distance) {
// std::cerr << "distance order is wrong!" << std::endl;
// std::cerr << id << ":" << rank << ":" << node[rank - 1].id << ":" << node[rank].id << std::endl;
if (NGT_LOG_DEBUG_) {
(*NGT_LOG_DEBUG_)("distance order is wrong!");
(*NGT_LOG_DEBUG_)(std::to_string(id) + ":" + std::to_string(rank) + ":" + std::to_string(node[rank - 1].id) + ":" + std::to_string(node[rank].id));
}
}
NGT::GraphNode &tn = *outGraph.getNode(id);
volatile bool found = false;
if (rank < 1000) {
for (size_t tni = 0; tni < tn.size() && !found; tni++) {
if (tn[tni].id == node[rank].id) {
continue;
}
NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id);
for (size_t dni = 0; dni < dstNode.size(); dni++) {
if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) {
found = true;
break;
}
}
}
} else {
#ifdef _OPENMP
#pragma omp parallel for num_threads(10)
#endif
for (size_t tni = 0; tni < tn.size(); tni++) {
if (found) {
continue;
}
if (tn[tni].id == node[rank].id) {
continue;
}
NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id);
for (size_t dni = 0; dni < dstNode.size(); dni++) {
if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) {
found = true;
}
}
}
}
if (!found) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
outGraph.addEdge(id, node.at(i, outGraph.repository.allocator).id,
node.at(i, outGraph.repository.allocator).distance, true);
#else
tn.push_back(NGT::ObjectDistance(node[rank].id, node[rank].distance));
#endif
} else {
removeCount++;
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
it++;
continue;
}
it++;
}
if (edge == false) {
break;
}
}
#endif // NGT_SHARED_MEMORY_ALLOCATOR
}
static void
adjustPathsEffectively(NGT::Index &outIndex)
{
NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex());
adjustPathsEffectively(outGraph);
}
static bool edgeComp(NGT::ObjectDistance a, NGT::ObjectDistance b) {
return a.id < b.id;
}
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance, NGT::GraphIndex &graph) {
NGT::ObjectDistance edge(edgeID, edgeDistance);
GraphNode::iterator ni = std::lower_bound(node.begin(graph.repository.allocator), node.end(graph.repository.allocator), edge, edgeComp);
node.insert(ni, edge, graph.repository.allocator);
}
static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID)
{
NGT::GraphNode &srcNode = *graph.getNode(srcNodeID);
GraphNode::iterator ni = std::lower_bound(srcNode.begin(graph.repository.allocator), srcNode.end(graph.repository.allocator), ObjectDistance(dstNodeID, 0.0), edgeComp);
return (ni != srcNode.end(graph.repository.allocator)) && ((*ni).id == dstNodeID);
}
#else
static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance) {
NGT::ObjectDistance edge(edgeID, edgeDistance);
GraphNode::iterator ni = std::lower_bound(node.begin(), node.end(), edge, edgeComp);
node.insert(ni, edge);
}
static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID)
{
NGT::GraphNode &srcNode = *graph.getNode(srcNodeID);
GraphNode::iterator ni = std::lower_bound(srcNode.begin(), srcNode.end(), ObjectDistance(dstNodeID, 0.0), edgeComp);
return (ni != srcNode.end()) && ((*ni).id == dstNodeID);
}
#endif
static void
adjustPathsEffectively(NGT::GraphIndex &outGraph)
{
Timer timer;
timer.start();
std::vector<NGT::GraphNode> tmpGraph;
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &node = *outGraph.getNode(id);
tmpGraph.push_back(node);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
node.clear(outGraph.repository.allocator);
#else
node.clear();
#endif
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
tmpGraph.push_back(NGT::GraphNode(outGraph.repository.allocator));
#else
tmpGraph.push_back(NGT::GraphNode());
#endif
}
}
if (outGraph.repository.size() != tmpGraph.size() + 1) {
std::stringstream msg;
msg << "GraphReconstructor: Fatal inner error. " << outGraph.repository.size() << ":" << tmpGraph.size();
NGTThrowException(msg);
}
timer.stop();
// std::cerr << "GraphReconstructor::adjustPaths: graph preparing time=" << timer << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor::adjustPaths: graph preparing time=" + std::to_string(timer.time));
timer.reset();
timer.start();
std::vector<std::vector<std::pair<uint32_t, uint32_t> > > removeCandidates(tmpGraph.size());
int removeCandidateCount = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (size_t idx = 0; idx < tmpGraph.size(); ++idx) {
auto it = tmpGraph.begin() + idx;
size_t id = idx + 1;
try {
NGT::GraphNode &srcNode = *it;
std::unordered_map<uint32_t, std::pair<size_t, double> > neighbors;
for (size_t sni = 0; sni < srcNode.size(); ++sni) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
neighbors[srcNode.at(sni, outGraph.repository.allocator).id] = std::pair<size_t, double>(sni, srcNode.at(sni, outGraph.repository.allocator).distance);
#else
neighbors[srcNode[sni].id] = std::pair<size_t, double>(sni, srcNode[sni].distance);
#endif
}
std::vector<std::pair<int, std::pair<uint32_t, uint32_t> > > candidates;
for (size_t sni = 0; sni < srcNode.size(); sni++) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
NGT::GraphNode &pathNode = tmpGraph[srcNode.at(sni, outGraph.repository.allocator).id - 1];
#else
NGT::GraphNode &pathNode = tmpGraph[srcNode[sni].id - 1];
#endif
for (size_t pni = 0; pni < pathNode.size(); pni++) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
auto dstNodeID = pathNode.at(pni, outGraph.repository.allocator).id;
#else
auto dstNodeID = pathNode[pni].id;
#endif
auto dstNode = neighbors.find(dstNodeID);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
if (dstNode != neighbors.end()
&& srcNode.at(sni, outGraph.repository.allocator).distance < (*dstNode).second.second
&& pathNode.at(pni, outGraph.repository.allocator).distance < (*dstNode).second.second
) {
#else
if (dstNode != neighbors.end()
&& srcNode[sni].distance < (*dstNode).second.second
&& pathNode[pni].distance < (*dstNode).second.second
) {
#endif
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode.at(sni, outGraph.repository.allocator).id, dstNodeID)));
#else
candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode[sni].id, dstNodeID)));
#endif
removeCandidateCount++;
}
}
}
sort(candidates.begin(), candidates.end(), std::greater<std::pair<int, std::pair<uint32_t, uint32_t>>>());
removeCandidates[id - 1].reserve(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
removeCandidates[id - 1].push_back(candidates[i].second);
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
timer.stop();
// std::cerr << "GraphReconstructor::adjustPaths: extracting removed edge candidates time=" << timer << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor::adjustPaths: extracting removed edge candidates time=" + std::to_string(timer.time));
timer.reset();
timer.start();
std::list<size_t> ids;
for (size_t idx = 0; idx < tmpGraph.size(); ++idx) {
ids.push_back(idx + 1);
}
int removeCount = 0;
removeCandidateCount = 0;
for (size_t rank = 0; ids.size() != 0; rank++) {
for (auto it = ids.begin(); it != ids.end(); ) {
size_t id = *it;
size_t idx = id - 1;
try {
NGT::GraphNode &srcNode = tmpGraph[idx];
if (rank >= srcNode.size()) {
if (!removeCandidates[idx].empty()) {
// std::cerr << "Something wrong! ID=" << id << " # of remaining candidates=" << removeCandidates[idx].size() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Something wrong! ID=" + std::to_string(id) + " # of remaining candidates=" + std::to_string(removeCandidates[idx].size()));
abort();
}
#if !defined(NGT_SHARED_MEMORY_ALLOCATOR)
NGT::GraphNode empty;
tmpGraph[idx] = empty;
#endif
it = ids.erase(it);
continue;
}
if (removeCandidates[idx].size() > 0) {
removeCandidateCount++;
bool pathExist = false;
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) {
#else
while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) {
#endif
size_t path = removeCandidates[idx].back().first;
size_t dst = removeCandidates[idx].back().second;
removeCandidates[idx].pop_back();
if (removeCandidates[idx].empty()) {
std::vector<std::pair<uint32_t, uint32_t>> empty;
removeCandidates[idx] = empty;
}
if ((hasEdge(outGraph, id, path)) && (hasEdge(outGraph, path, dst))) {
pathExist = true;
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) {
#else
while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) {
#endif
removeCandidates[idx].pop_back();
if (removeCandidates[idx].empty()) {
std::vector<std::pair<uint32_t, uint32_t>> empty;
removeCandidates[idx] = empty;
}
}
break;
}
}
if (pathExist) {
removeCount++;
it++;
continue;
}
}
NGT::GraphNode &outSrcNode = *outGraph.getNode(id);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
insert(outSrcNode, srcNode.at(rank, outGraph.repository.allocator).id, srcNode.at(rank, outGraph.repository.allocator).distance, outGraph);
#else
insert(outSrcNode, srcNode[rank].id, srcNode[rank].distance);
#endif
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
it++;
continue;
}
it++;
}
}
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &node = *outGraph.getNode(id);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
std::sort(node.begin(outGraph.repository.allocator), node.end(outGraph.repository.allocator));
#else
std::sort(node.begin(), node.end());
#endif
} catch(...) {}
}
}
static
void convertToANNG(std::vector<NGT::ObjectDistances> &graph)
{
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("convertToANNG is not implemented for shared memory.");
// std::cerr << "convertToANNG is not implemented for shared memory." << std::endl;
return;
#else
// std::cerr << "convertToANNG begin" << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("convertToANNG begin");
for (size_t idx = 0; idx < graph.size(); idx++) {
NGT::GraphNode &node = graph[idx];
for (auto ni = node.begin(); ni != node.end(); ++ni) {
graph[(*ni).id - 1].push_back(NGT::ObjectDistance(idx + 1, (*ni).distance));
}
}
for (size_t idx = 0; idx < graph.size(); idx++) {
NGT::GraphNode &node = graph[idx];
if (node.size() == 0) {
continue;
}
std::sort(node.begin(), node.end());
NGT::ObjectID prev = 0;
for (auto it = node.begin(); it != node.end();) {
if (prev == (*it).id) {
it = node.erase(it);
continue;
}
prev = (*it).id;
it++;
}
NGT::GraphNode tmp = node;
node.swap(tmp);
}
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("convertToANNG end");
// std::cerr << "convertToANNG end" << std::endl;
#endif
}
static
void reconstructGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph, size_t originalEdgeSize, size_t reverseEdgeSize)
{
if (reverseEdgeSize > 10000) {
// std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("something wrong. Edge size=" + std::to_string(reverseEdgeSize));
exit(1);
}
NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer;
originalEdgeTimer.start();
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &node = *outGraph.getNode(id);
if (originalEdgeSize == 0) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
node.clear(outGraph.repository.allocator);
#else
NGT::GraphNode empty;
node.swap(empty);
#endif
} else {
NGT::ObjectDistances n = graph[id - 1];
if (n.size() < originalEdgeSize) {
// std::cerr << "GraphReconstructor: Warning. The edges are too few. " << n.size() << ":" << originalEdgeSize << " for " << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. The edges are too few. " + std::to_string(n.size()) + ":" + std::to_string(originalEdgeSize) + " for " + std::to_string(id));
continue;
}
n.resize(originalEdgeSize);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
node.copy(n, outGraph.repository.allocator);
#else
node.swap(n);
#endif
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
originalEdgeTimer.stop();
reverseEdgeTimer.start();
int insufficientNodeCount = 0;
for (size_t id = 1; id <= graph.size(); ++id) {
try {
NGT::ObjectDistances &node = graph[id - 1];
size_t rsize = reverseEdgeSize;
if (rsize > node.size()) {
insufficientNodeCount++;
rsize = node.size();
}
for (size_t i = 0; i < rsize; ++i) {
NGT::Distance distance = node[i].distance;
size_t nodeID = node[i].id;
try {
NGT::GraphNode &n = *outGraph.getNode(nodeID);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
n.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator);
#else
n.push_back(NGT::ObjectDistance(id, distance));
#endif
} catch(...) {}
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
reverseEdgeTimer.stop();
if (insufficientNodeCount != 0) {
// std::cerr << "# of the nodes edges of which are in short = " << insufficientNodeCount << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("# of the nodes edges of which are in short = " + std::to_string(insufficientNodeCount));
}
normalizeEdgeTimer.start();
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &n = *outGraph.getNode(id);
if (id % 100000 == 0) {
// std::cerr << "Processed " << id << " nodes" << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed " + std::to_string(id) + " nodes");
}
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
std::sort(n.begin(outGraph.repository.allocator), n.end(outGraph.repository.allocator));
#else
std::sort(n.begin(), n.end());
#endif
NGT::ObjectID prev = 0;
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
for (auto it = n.begin(outGraph.repository.allocator); it != n.end(outGraph.repository.allocator);) {
#else
for (auto it = n.begin(); it != n.end();) {
#endif
if (prev == (*it).id) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
it = n.erase(it, outGraph.repository.allocator);
#else
it = n.erase(it);
#endif
continue;
}
prev = (*it).id;
it++;
}
#if !defined(NGT_SHARED_MEMORY_ALLOCATOR)
NGT::GraphNode tmp = n;
n.swap(tmp);
#endif
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
normalizeEdgeTimer.stop();
// std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time
// << ":" << normalizeEdgeTimer.time << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Reconstruction time=" + std::to_string(originalEdgeTimer.time) + ":" + std::to_string(reverseEdgeTimer.time)
+ ":" + std::to_string(normalizeEdgeTimer.time));
NGT::Property prop;
outGraph.getProperty().get(prop);
prop.graphType = NGT::NeighborhoodGraph::GraphTypeONNG;
outGraph.getProperty().set(prop);
}
static
void reconstructGraphWithConstraint(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph,
size_t originalEdgeSize, size_t reverseEdgeSize,
char mode = 'a')
{
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("reconstructGraphWithConstraint is not implemented.");
// std::cerr << "reconstructGraphWithConstraint is not implemented." << std::endl;
abort();
#else
NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer;
if (reverseEdgeSize > 10000) {
// std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("something wrong. Edge size=" + std::to_string(reverseEdgeSize));
exit(1);
}
for (size_t id = 1; id < outGraph.repository.size(); id++) {
if (id % 1000000 == 0) {
// std::cerr << "Processed " << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed " + std::to_string(id));
}
try {
NGT::GraphNode &node = *outGraph.getNode(id);
if (node.size() == 0) {
continue;
}
node.clear();
NGT::GraphNode empty;
node.swap(empty);
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
NGT::GraphIndex::showStatisticsOfGraph(outGraph);
std::vector<ObjectDistances> reverse(graph.size() + 1);
for (size_t id = 1; id <= graph.size(); ++id) {
try {
NGT::GraphNode &node = graph[id - 1];
if (id % 100000 == 0) {
// std::cerr << "Processed (summing up) " << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed (summing up) " + std::to_string(id));
}
for (size_t rank = 0; rank < node.size(); rank++) {
reverse[node[rank].id].push_back(ObjectDistance(id, node[rank].distance));
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
std::vector<std::pair<size_t, size_t> > reverseSize(graph.size() + 1);
reverseSize[0] = std::pair<size_t, size_t>(0, 0);
for (size_t rid = 1; rid <= graph.size(); ++rid) {
reverseSize[rid] = std::pair<size_t, size_t>(reverse[rid].size(), rid);
}
std::sort(reverseSize.begin(), reverseSize.end());
std::vector<uint32_t> indegreeCount(graph.size(), 0);
size_t zeroCount = 0;
for (size_t sizerank = 0; sizerank <= reverseSize.size(); sizerank++) {
if (reverseSize[sizerank].first == 0) {
zeroCount++;
continue;
}
size_t rid = reverseSize[sizerank].second;
ObjectDistances &rnode = reverse[rid];
for (auto rni = rnode.begin(); rni != rnode.end(); ++rni) {
if (indegreeCount[(*rni).id] >= reverseEdgeSize) {
continue;
}
NGT::GraphNode &node = *outGraph.getNode(rid);
if (indegreeCount[(*rni).id] > 0 && node.size() >= originalEdgeSize) {
continue;
}
node.push_back(NGT::ObjectDistance((*rni).id, (*rni).distance));
indegreeCount[(*rni).id]++;
}
}
reverseEdgeTimer.stop();
// std::cerr << "The number of nodes with zero outdegree by reverse edges=" << zeroCount << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("The number of nodes with zero outdegree by reverse edges=" + std::to_string(zeroCount));
NGT::GraphIndex::showStatisticsOfGraph(outGraph);
normalizeEdgeTimer.start();
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &n = *outGraph.getNode(id);
if (id % 100000 == 0) {
// std::cerr << "Processed " << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed " + std::to_string(id));
}
std::sort(n.begin(), n.end());
NGT::ObjectID prev = 0;
for (auto it = n.begin(); it != n.end();) {
if (prev == (*it).id) {
it = n.erase(it);
continue;
}
prev = (*it).id;
it++;
}
NGT::GraphNode tmp = n;
n.swap(tmp);
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
normalizeEdgeTimer.stop();
NGT::GraphIndex::showStatisticsOfGraph(outGraph);
originalEdgeTimer.start();
for (size_t id = 1; id < outGraph.repository.size(); id++) {
if (id % 1000000 == 0) {
// std::cerr << "Processed " << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed " + std::to_string(id));
}
NGT::GraphNode &node = graph[id - 1];
try {
NGT::GraphNode &onode = *outGraph.getNode(id);
bool stop = false;
for (size_t rank = 0; (rank < node.size() && rank < originalEdgeSize) && stop == false; rank++) {
switch (mode) {
case 'a':
if (onode.size() >= originalEdgeSize) {
stop = true;
continue;
}
break;
case 'c':
break;
}
NGT::Distance distance = node[rank].distance;
size_t nodeID = node[rank].id;
outGraph.addEdge(id, nodeID, distance, false);
}
} catch(NGT::Exception &err) {
// std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("GraphReconstructor: Warning. Cannot get the node. ID=" + std::to_string(id) + ":" + err.what());
continue;
}
}
originalEdgeTimer.stop();
NGT::GraphIndex::showStatisticsOfGraph(outGraph);
// std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time
// << ":" << normalizeEdgeTimer.time << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Reconstruction time=" + std::to_string(originalEdgeTimer.time) + ":" + std::to_string(reverseEdgeTimer.time)
+ ":" + std::to_string(normalizeEdgeTimer.time));
#endif
}
// reconstruct a pseudo ANNG with a fewer edges from an actual ANNG with more edges.
// graph is a source ANNG
// index is an index with a reconstructed ANNG
static
void reconstructANNGFromANNG(std::vector<NGT::ObjectDistances> &graph, NGT::Index &index, size_t edgeSize)
{
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("reconstructANNGFromANNG is not implemented.");
// std::cerr << "reconstructANNGFromANNG is not implemented." << std::endl;
abort();
#else
NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(index.getIndex());
// remove all edges in the index.
for (size_t id = 1; id < outGraph.repository.size(); id++) {
if (id % 1000000 == 0) {
// std::cerr << "Processed " << id << " nodes." << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("Processed " + std::to_string(id) + " nodes.");
}
try {
NGT::GraphNode &node = *outGraph.getNode(id);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
node.clear(outGraph.repository.allocator);
#else
NGT::GraphNode empty;
node.swap(empty);
#endif
} catch(NGT::Exception &err) {
}
}
for (size_t id = 1; id <= graph.size(); ++id) {
size_t edgeCount = 0;
try {
NGT::ObjectDistances &node = graph[id - 1];
NGT::GraphNode &n = *outGraph.getNode(id);
NGT::Distance prevDistance = 0.0;
assert(n.size() == 0);
for (size_t i = 0; i < node.size(); ++i) {
NGT::Distance distance = node[i].distance;
if (prevDistance > distance) {
NGTThrowException("Edge distance order is invalid");
}
prevDistance = distance;
size_t nodeID = node[i].id;
if (node[i].id < id) {
try {
NGT::GraphNode &dn = *outGraph.getNode(nodeID);
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
n.push_back(NGT::ObjectDistance(nodeID, distance), outGraph.repository.allocator);
dn.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator);
#else
n.push_back(NGT::ObjectDistance(nodeID, distance));
dn.push_back(NGT::ObjectDistance(id, distance));
#endif
} catch(...) {}
edgeCount++;
}
if (edgeCount >= edgeSize) {
break;
}
}
} catch(NGT::Exception &err) {
}
}
for (size_t id = 1; id < outGraph.repository.size(); id++) {
try {
NGT::GraphNode &n = *outGraph.getNode(id);
std::sort(n.begin(), n.end());
NGT::ObjectID prev = 0;
for (auto it = n.begin(); it != n.end();) {
if (prev == (*it).id) {
it = n.erase(it);
continue;
}
prev = (*it).id;
it++;
}
NGT::GraphNode tmp = n;
n.swap(tmp);
} catch (...) {
}
}
#endif
}
static void refineANNG(NGT::Index &index, bool unlog, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) {
NGT::StdOstreamRedirector redirector(unlog);
redirector.begin();
try {
refineANNG(index, epsilon, accuracy, noOfEdges, exploreEdgeSize, batchSize);
} catch (NGT::Exception &err) {
redirector.end();
throw(err);
}
}
static void refineANNG(NGT::Index &index, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) {
#if defined(NGT_SHARED_MEMORY_ALLOCATOR)
NGTThrowException("GraphReconstructor::refineANNG: Not implemented for the shared memory option.");
#else
auto prop = static_cast<GraphIndex&>(index.getIndex()).getGraphProperty();
NGT::ObjectRepository &objectRepository = index.getObjectSpace().getRepository();
NGT::GraphIndex &graphIndex = static_cast<GraphIndex&>(index.getIndex());
size_t nOfObjects = objectRepository.size();
bool error = false;
std::string errorMessage;
for (size_t bid = 1; bid < nOfObjects; bid += batchSize) {
NGT::ObjectDistances results[batchSize];
// search
#pragma omp parallel for
for (size_t idx = 0; idx < batchSize; idx++) {
size_t id = bid + idx;
if (id % 100000 == 0) {
// std::cerr << "# of processed objects=" << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("# of processed objects=" + std::to_string(id));
}
if (objectRepository.isEmpty(id)) {
continue;
}
NGT::SearchContainer searchContainer(*objectRepository.get(id));
searchContainer.setResults(&results[idx]);
assert(prop.edgeSizeForCreation > 0);
searchContainer.setSize(noOfEdges > prop.edgeSizeForCreation ? noOfEdges : prop.edgeSizeForCreation);
if (accuracy > 0.0) {
searchContainer.setExpectedAccuracy(accuracy);
} else {
searchContainer.setEpsilon(epsilon);
}
if (exploreEdgeSize != INT_MIN) {
searchContainer.setEdgeSize(exploreEdgeSize);
}
if (!error) {
try {
index.search(searchContainer);
} catch (NGT::Exception &err) {
#pragma omp critical
{
error = true;
errorMessage = err.what();
}
}
}
}
if (error) {
std::stringstream msg;
msg << "GraphReconstructor::refineANNG: " << errorMessage;
NGTThrowException(msg);
}
// outgoing edges
#pragma omp parallel for
for (size_t idx = 0; idx < batchSize; idx++) {
size_t id = bid + idx;
if (objectRepository.isEmpty(id)) {
continue;
}
NGT::GraphNode &node = *graphIndex.getNode(id);
for (auto i = results[idx].begin(); i != results[idx].end(); ++i) {
if ((*i).id != id) {
node.push_back(*i);
}
}
std::sort(node.begin(), node.end());
// dedupe
ObjectID prev = 0;
for (GraphNode::iterator ni = node.begin(); ni != node.end();) {
if (prev == (*ni).id) {
ni = node.erase(ni);
continue;
}
prev = (*ni).id;
ni++;
}
}
// incomming edges
if (noOfEdges != 0) {
continue;
}
for (size_t idx = 0; idx < batchSize; idx++) {
size_t id = bid + idx;
if (id % 10000 == 0) {
// std::cerr << "# of processed objects=" << id << std::endl;
if (NGT_LOG_DEBUG_)
(*NGT_LOG_DEBUG_)("# of processed objects=" + std::to_string(id));
}
for (auto i = results[idx].begin(); i != results[idx].end(); ++i) {
if ((*i).id != id) {
NGT::GraphNode &node = *graphIndex.getNode((*i).id);
graphIndex.addEdge(node, id, (*i).distance, false);
}
}
}
}
if (noOfEdges != 0) {
// prune to build knng
size_t nedges = noOfEdges < 0 ? -noOfEdges : noOfEdges;
#pragma omp parallel for
for (ObjectID id = 1; id < nOfObjects; ++id) {
if (objectRepository.isEmpty(id)) {
continue;
}
NGT::GraphNode &node = *graphIndex.getNode(id);
if (node.size() > nedges) {
node.resize(nedges);
}
}
}
#endif // defined(NGT_SHARED_MEMORY_ALLOCATOR)
}
};
}; // NGT
|
scan-5.c | int
foo (int *a, int *b)
{
int r = 0;
#pragma omp parallel for reduction (inscan, +:r) default(none) firstprivate (a, b)
for (int i = 0; i < 64; i++)
{
r += a[i];
#pragma omp scan inclusive (r)
b[i] = r;
}
return r;
}
|
displacement_lagrangemultiplier_mixed_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierMixedContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierMixedContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierMixedContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, residual_dof_value, dof_value, dof_incr) reduction(+:disp_residual_solution_norm, lm_solution_norm, lm_increase_norm, disp_dof_num, lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id]) {
const auto& r_curr_var = it_dof->GetVariable();
if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
lm_solution_norm += dof_value * dof_value;
lm_increase_norm += dof_incr * dof_incr;
lm_dof_num++;
} else {
residual_dof_value = rb[dof_id];
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
}
if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
mDispCurrentResidualNorm = disp_residual_solution_norm;
const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0;
const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num);
TDataType residual_disp_ratio;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm < Tolerance) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the absolute norms
TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio;
r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if ( disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flag
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierMixedContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
|
gridify-1.c | /* { dg-do compile } */
/* { dg-require-effective-target offload_hsa } */
/* { dg-options "-fopenmp -fdump-tree-omplower-details" } */
void
foo1 (int n, int *a, int workgroup_size)
{
int i;
#pragma omp target
#pragma omp teams thread_limit(workgroup_size)
#pragma omp distribute parallel for shared(a) firstprivate(n) private(i)
for (i = 0; i < n; i++)
a[i]++;
}
void
foo2 (int j, int n, int *a)
{
int i;
#pragma omp target teams
#pragma omp distribute parallel for shared(a) firstprivate(n) private(i) firstprivate(j)
for (i = j + 1; i < n; i++)
a[i] = i;
}
void
foo3 (int j, int n, int *a)
{
int i;
#pragma omp target teams
#pragma omp distribute parallel for shared(a) firstprivate(n) private(i) firstprivate(j)
for (i = j + 1; i < n; i += 3)
a[i] = i;
}
void
foo4 (int j, int n, int *a)
{
#pragma omp parallel
{
#pragma omp single
{
int i;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for shared(a) firstprivate(n) private(i) firstprivate(j)
for (i = j + 1; i < n; i += 3)
a[i] = i;
}
}
}
/* { dg-final { scan-tree-dump-times "Target construct will be turned into a gridified HSA kernel" 4 "omplower" } } */
|
b3v32ld.c | /**** BSIM3v3.2.4, Released by Xuemei Xi 12/21/2001 ****/
/**********
* Copyright 2001 Regents of the University of California. All rights reserved.
* File: b3ld.c of BSIM3v3.2.4
* Author: 1991 JianHui Huang and Min-Chie Jeng.
* Modified by Mansun Chan (1995).
* Author: 1997-1999 Weidong Liu.
* Author: 2001 Xuemei Xi
* Modified by Xuemei Xi, 10/05, 12/21, 2001.
* Modified by Paolo Nenzi 2002 and Dietmar Warning 2003
**********/
#include "ngspice/ngspice.h"
#include "ngspice/cktdefs.h"
#include "bsim3v32def.h"
#include "ngspice/trandefs.h"
#include "ngspice/const.h"
#include "ngspice/sperror.h"
#include "ngspice/devdefs.h"
#include "ngspice/suffix.h"
#define MAX_EXP 5.834617425e14
#define MIN_EXP 1.713908431e-15
#define EXP_THRESHOLD 34.0
#define EPSOX 3.453133e-11
#define EPSSI 1.03594e-10
#define Charge_q 1.60219e-19
#define DELTA_1 0.02
#define DELTA_2 0.02
#define DELTA_3 0.02
#define DELTA_4 0.02
#ifdef USE_OMP
int BSIM3v32LoadOMP(BSIM3v32instance *here, CKTcircuit *ckt);
void BSIM3v32LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt);
#endif
int
BSIM3v32load (GENmodel *inModel, CKTcircuit *ckt)
{
#ifdef USE_OMP
int idx;
BSIM3v32model *model = (BSIM3v32model*)inModel;
int error = 0;
BSIM3v32instance **InstArray;
InstArray = model->BSIM3v32InstanceArray;
#pragma omp parallel for
for (idx = 0; idx < model->BSIM3v32InstCount; idx++) {
BSIM3v32instance *here = InstArray[idx];
int local_error = BSIM3v32LoadOMP(here, ckt);
if (local_error)
error = local_error;
}
BSIM3v32LoadRhsMat(inModel, ckt);
return error;
}
int BSIM3v32LoadOMP(BSIM3v32instance *here, CKTcircuit *ckt) {
BSIM3v32model *model = BSIM3v32modPtr(here);
#else
BSIM3v32model *model = (BSIM3v32model*)inModel;
BSIM3v32instance *here;
#endif
double SourceSatCurrent, DrainSatCurrent;
double ag0, qgd, qgs, qgb, von, cbhat, VgstNVt, ExpVgst;
double cdrain, cdhat, cdreq, ceqbd, ceqbs, ceqqb, ceqqd, ceqqg, ceq, geq;
double czbd, czbdsw, czbdswg, czbs, czbssw, czbsswg, evbd, evbs, arg, sarg;
double delvbd, delvbs, delvds, delvgd, delvgs;
double Vfbeff, dVfbeff_dVg, dVfbeff_dVd = 0.0, dVfbeff_dVb, V3, V4;
double gcbdb, gcbgb, gcbsb, gcddb, gcdgb, gcdsb, gcgdb, gcggb, gcgsb, gcsdb;
#ifndef NEWCONV
double tol;
#endif
double gcsgb, gcssb, MJ, MJSW, MJSWG;
double vbd, vbs, vds, vgb, vgd, vgs, vgdo;
#ifndef PREDICTOR
double xfact;
#endif
double qgate = 0.0, qbulk = 0.0, qdrn = 0.0, qsrc;
double qinoi, cqgate, cqbulk, cqdrn;
double Vds, Vgs, Vbs, Gmbs, FwdSum, RevSum;
double Vgs_eff, Vfb, dVfb_dVb = 0.0, dVfb_dVd = 0.0;
double Phis, dPhis_dVb, sqrtPhis, dsqrtPhis_dVb, Vth, dVth_dVb, dVth_dVd;
double Vgst, dVgst_dVg, dVgst_dVb, dVgs_eff_dVg, Nvtm;
double Vtm;
double n, dn_dVb, dn_dVd, voffcv, noff, dnoff_dVd, dnoff_dVb;
double ExpArg, V0, CoxWLcen, QovCox, LINK;
double DeltaPhi, dDeltaPhi_dVg, dDeltaPhi_dVd, dDeltaPhi_dVb;
double Cox, Tox, Tcen, dTcen_dVg, dTcen_dVd, dTcen_dVb;
double Ccen, Coxeff, dCoxeff_dVg, dCoxeff_dVd, dCoxeff_dVb;
double Denomi, dDenomi_dVg, dDenomi_dVd, dDenomi_dVb;
double ueff, dueff_dVg, dueff_dVd, dueff_dVb;
double Esat, Vdsat;
double EsatL, dEsatL_dVg, dEsatL_dVd, dEsatL_dVb;
double dVdsat_dVg, dVdsat_dVb, dVdsat_dVd, Vasat, dAlphaz_dVg, dAlphaz_dVb;
double dVasat_dVg, dVasat_dVb, dVasat_dVd, Va, dVa_dVd, dVa_dVg, dVa_dVb;
double Vbseff, dVbseff_dVb, VbseffCV, dVbseffCV_dVb;
double Arg1, One_Third_CoxWL, Two_Third_CoxWL, Alphaz, CoxWL;
double T0, dT0_dVg, dT0_dVd, dT0_dVb;
double T1, dT1_dVg, dT1_dVd, dT1_dVb;
double T2, dT2_dVg, dT2_dVd, dT2_dVb;
double T3, dT3_dVg, dT3_dVd, dT3_dVb;
double T4;
double T5;
double T6;
double T7;
double T8;
double T9;
double T10;
double T11, T12;
double tmp, Abulk, dAbulk_dVb, Abulk0, dAbulk0_dVb;
double VACLM, dVACLM_dVg, dVACLM_dVd, dVACLM_dVb;
double VADIBL, dVADIBL_dVg, dVADIBL_dVd, dVADIBL_dVb;
double Xdep, dXdep_dVb, lt1, dlt1_dVb, ltw, dltw_dVb, Delt_vth, dDelt_vth_dVb;
double Theta0, dTheta0_dVb;
double TempRatio, tmp1, tmp2, tmp3, tmp4;
double DIBL_Sft, dDIBL_Sft_dVd, Lambda, dLambda_dVg;
double Idtot, Ibtot;
#ifndef NOBYPASS
double tempv;
#endif
double a1, ScalingFactor;
double Vgsteff, dVgsteff_dVg, dVgsteff_dVd, dVgsteff_dVb;
double Vdseff, dVdseff_dVg, dVdseff_dVd, dVdseff_dVb;
double VdseffCV, dVdseffCV_dVg, dVdseffCV_dVd, dVdseffCV_dVb;
double diffVds, dAbulk_dVg;
double beta, dbeta_dVg, dbeta_dVd, dbeta_dVb;
double gche, dgche_dVg, dgche_dVd, dgche_dVb;
double fgche1, dfgche1_dVg, dfgche1_dVd, dfgche1_dVb;
double fgche2, dfgche2_dVg, dfgche2_dVd, dfgche2_dVb;
double Idl, dIdl_dVg, dIdl_dVd, dIdl_dVb;
double Idsa, dIdsa_dVg, dIdsa_dVd, dIdsa_dVb;
double Ids, Gm, Gds, Gmb;
double Isub, Gbd, Gbg, Gbb;
double VASCBE, dVASCBE_dVg, dVASCBE_dVd, dVASCBE_dVb;
double CoxWovL;
double Rds, dRds_dVg, dRds_dVb, WVCox, WVCoxRds;
double Vgst2Vtm, VdsatCV, dVdsatCV_dVg, dVdsatCV_dVb;
double Leff, Weff, dWeff_dVg, dWeff_dVb;
double AbulkCV, dAbulkCV_dVb;
double qgdo, qgso, cgdo, cgso;
double qcheq = 0.0, qdef, gqdef = 0.0, cqdef, cqcheq, gtau_diff, gtau_drift;
double gcqdb = 0.0,gcqsb = 0.0, gcqgb = 0.0,gcqbb = 0.0;
double dxpart, sxpart, ggtg, ggtd, ggts, ggtb;
double ddxpart_dVd, ddxpart_dVg, ddxpart_dVb, ddxpart_dVs;
double dsxpart_dVd, dsxpart_dVg, dsxpart_dVb, dsxpart_dVs;
double gbspsp, gbbdp, gbbsp, gbspg, gbspb, gbspdp;
double gbdpdp, gbdpg, gbdpb, gbdpsp;
double Cgg, Cgd, Cgb, Cdg, Cdd, Cds;
double Csg, Csd, Css, Csb, Cbg, Cbd, Cbb;
double Cgg1, Cgb1, Cgd1, Cbg1, Cbb1, Cbd1, Qac0, Qsub0;
double dQac0_dVg, dQac0_dVd = 0.0, dQac0_dVb, dQsub0_dVg;
double dQsub0_dVd, dQsub0_dVb;
double m;
struct bsim3v32SizeDependParam *pParam;
int ByPass, Check, ChargeComputationNeeded, error;
ScalingFactor = 1.0e-9;
ChargeComputationNeeded =
((ckt->CKTmode & (MODEDCTRANCURVE | MODEAC | MODETRAN | MODEINITSMSIG)) ||
((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC)))
? 1 : 0;
#ifndef USE_OMP
for (; model != NULL; model = BSIM3v32nextModel(model))
{ for (here = BSIM3v32instances(model); here != NULL;
here = BSIM3v32nextInstance(here))
{
#endif
Check = 1;
ByPass = 0;
pParam = here->pParam;
if ((ckt->CKTmode & MODEINITSMSIG))
{ vbs = *(ckt->CKTstate0 + here->BSIM3v32vbs);
vgs = *(ckt->CKTstate0 + here->BSIM3v32vgs);
vds = *(ckt->CKTstate0 + here->BSIM3v32vds);
qdef = *(ckt->CKTstate0 + here->BSIM3v32qdef);
}
else if ((ckt->CKTmode & MODEINITTRAN))
{ vbs = *(ckt->CKTstate1 + here->BSIM3v32vbs);
vgs = *(ckt->CKTstate1 + here->BSIM3v32vgs);
vds = *(ckt->CKTstate1 + here->BSIM3v32vds);
qdef = *(ckt->CKTstate1 + here->BSIM3v32qdef);
}
else if ((ckt->CKTmode & MODEINITJCT) && !here->BSIM3v32off)
{ vds = model->BSIM3v32type * here->BSIM3v32icVDS;
vgs = model->BSIM3v32type * here->BSIM3v32icVGS;
vbs = model->BSIM3v32type * here->BSIM3v32icVBS;
qdef = 0.0;
if ((vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) &&
((ckt->CKTmode & (MODETRAN | MODEAC|MODEDCOP |
MODEDCTRANCURVE)) || (!(ckt->CKTmode & MODEUIC))))
{ vbs = 0.0;
vgs = model->BSIM3v32type * here->BSIM3v32vth0 + 0.1;
vds = 0.1;
}
}
else if ((ckt->CKTmode & (MODEINITJCT | MODEINITFIX)) &&
(here->BSIM3v32off))
{ qdef = vbs = vgs = vds = 0.0;
}
else
{
#ifndef PREDICTOR
if ((ckt->CKTmode & MODEINITPRED))
{ xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1];
*(ckt->CKTstate0 + here->BSIM3v32vbs) =
*(ckt->CKTstate1 + here->BSIM3v32vbs);
vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3v32vbs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM3v32vbs)));
*(ckt->CKTstate0 + here->BSIM3v32vgs) =
*(ckt->CKTstate1 + here->BSIM3v32vgs);
vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3v32vgs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM3v32vgs)));
*(ckt->CKTstate0 + here->BSIM3v32vds) =
*(ckt->CKTstate1 + here->BSIM3v32vds);
vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3v32vds))
- (xfact * (*(ckt->CKTstate2 + here->BSIM3v32vds)));
*(ckt->CKTstate0 + here->BSIM3v32vbd) =
*(ckt->CKTstate0 + here->BSIM3v32vbs)
- *(ckt->CKTstate0 + here->BSIM3v32vds);
*(ckt->CKTstate0 + here->BSIM3v32qdef) =
*(ckt->CKTstate1 + here->BSIM3v32qdef);
qdef = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM3v32qdef))
-(xfact * (*(ckt->CKTstate2 + here->BSIM3v32qdef)));
}
else
{
#endif /* PREDICTOR */
vbs = model->BSIM3v32type
* (*(ckt->CKTrhsOld + here->BSIM3v32bNode)
- *(ckt->CKTrhsOld + here->BSIM3v32sNodePrime));
vgs = model->BSIM3v32type
* (*(ckt->CKTrhsOld + here->BSIM3v32gNode)
- *(ckt->CKTrhsOld + here->BSIM3v32sNodePrime));
vds = model->BSIM3v32type
* (*(ckt->CKTrhsOld + here->BSIM3v32dNodePrime)
- *(ckt->CKTrhsOld + here->BSIM3v32sNodePrime));
qdef = model->BSIM3v32type
* (*(ckt->CKTrhsOld + here->BSIM3v32qNode));
#ifndef PREDICTOR
}
#endif /* PREDICTOR */
vbd = vbs - vds;
vgd = vgs - vds;
vgdo = *(ckt->CKTstate0 + here->BSIM3v32vgs)
- *(ckt->CKTstate0 + here->BSIM3v32vds);
delvbs = vbs - *(ckt->CKTstate0 + here->BSIM3v32vbs);
delvbd = vbd - *(ckt->CKTstate0 + here->BSIM3v32vbd);
delvgs = vgs - *(ckt->CKTstate0 + here->BSIM3v32vgs);
delvds = vds - *(ckt->CKTstate0 + here->BSIM3v32vds);
delvgd = vgd - vgdo;
if (here->BSIM3v32mode >= 0)
{ Idtot = here->BSIM3v32cd + here->BSIM3v32csub - here->BSIM3v32cbd;
cdhat = Idtot - here->BSIM3v32gbd * delvbd
+ (here->BSIM3v32gmbs + here->BSIM3v32gbbs) * delvbs
+ (here->BSIM3v32gm + here->BSIM3v32gbgs) * delvgs
+ (here->BSIM3v32gds + here->BSIM3v32gbds) * delvds;
Ibtot = here->BSIM3v32cbs + here->BSIM3v32cbd - here->BSIM3v32csub;
cbhat = Ibtot + here->BSIM3v32gbd * delvbd
+ (here->BSIM3v32gbs - here->BSIM3v32gbbs) * delvbs
- here->BSIM3v32gbgs * delvgs
- here->BSIM3v32gbds * delvds;
}
else
{ Idtot = here->BSIM3v32cd - here->BSIM3v32cbd;
cdhat = Idtot - (here->BSIM3v32gbd - here->BSIM3v32gmbs) * delvbd
+ here->BSIM3v32gm * delvgd
- here->BSIM3v32gds * delvds;
Ibtot = here->BSIM3v32cbs + here->BSIM3v32cbd - here->BSIM3v32csub;
cbhat = Ibtot + here->BSIM3v32gbs * delvbs
+ (here->BSIM3v32gbd - here->BSIM3v32gbbs) * delvbd
- here->BSIM3v32gbgs * delvgd
+ here->BSIM3v32gbds * delvds;
}
#ifndef NOBYPASS
/* following should be one big if connected by && all over
* the place, but some C compilers can't handle that, so
* we split it up here to let them digest it in stages
*/
if ((!(ckt->CKTmode & MODEINITPRED)) && (ckt->CKTbypass))
if ((fabs(delvbs) < (ckt->CKTreltol * MAX(fabs(vbs),
fabs(*(ckt->CKTstate0+here->BSIM3v32vbs))) + ckt->CKTvoltTol)))
if ((fabs(delvbd) < (ckt->CKTreltol * MAX(fabs(vbd),
fabs(*(ckt->CKTstate0+here->BSIM3v32vbd))) + ckt->CKTvoltTol)))
if ((fabs(delvgs) < (ckt->CKTreltol * MAX(fabs(vgs),
fabs(*(ckt->CKTstate0+here->BSIM3v32vgs))) + ckt->CKTvoltTol)))
if ((fabs(delvds) < (ckt->CKTreltol * MAX(fabs(vds),
fabs(*(ckt->CKTstate0+here->BSIM3v32vds))) + ckt->CKTvoltTol)))
if ((fabs(cdhat - Idtot) < ckt->CKTreltol
* MAX(fabs(cdhat),fabs(Idtot)) + ckt->CKTabstol))
{ tempv = MAX(fabs(cbhat),fabs(Ibtot)) + ckt->CKTabstol;
if ((fabs(cbhat - Ibtot)) < ckt->CKTreltol * tempv)
{ /* bypass code */
vbs = *(ckt->CKTstate0 + here->BSIM3v32vbs);
vbd = *(ckt->CKTstate0 + here->BSIM3v32vbd);
vgs = *(ckt->CKTstate0 + here->BSIM3v32vgs);
vds = *(ckt->CKTstate0 + here->BSIM3v32vds);
qdef = *(ckt->CKTstate0 + here->BSIM3v32qdef);
vgd = vgs - vds;
vgb = vgs - vbs;
cdrain = here->BSIM3v32cd;
if ((ckt->CKTmode & (MODETRAN | MODEAC)) ||
((ckt->CKTmode & MODETRANOP) &&
(ckt->CKTmode & MODEUIC)))
{ ByPass = 1;
qgate = here->BSIM3v32qgate;
qbulk = here->BSIM3v32qbulk;
qdrn = here->BSIM3v32qdrn;
goto line755;
}
else
{ goto line850;
}
}
}
#endif /*NOBYPASS*/
von = here->BSIM3v32von;
if (*(ckt->CKTstate0 + here->BSIM3v32vds) >= 0.0)
{ vgs = DEVfetlim(vgs, *(ckt->CKTstate0+here->BSIM3v32vgs), von);
vds = vgs - vgd;
vds = DEVlimvds(vds, *(ckt->CKTstate0 + here->BSIM3v32vds));
vgd = vgs - vds;
}
else
{ vgd = DEVfetlim(vgd, vgdo, von);
vds = vgs - vgd;
vds = -DEVlimvds(-vds, -(*(ckt->CKTstate0+here->BSIM3v32vds)));
vgs = vgd + vds;
}
if (vds >= 0.0)
{ vbs = DEVpnjlim(vbs, *(ckt->CKTstate0 + here->BSIM3v32vbs),
CONSTvt0, model->BSIM3v32vcrit, &Check);
vbd = vbs - vds;
}
else
{ vbd = DEVpnjlim(vbd, *(ckt->CKTstate0 + here->BSIM3v32vbd),
CONSTvt0, model->BSIM3v32vcrit, &Check);
vbs = vbd + vds;
}
}
/* determine DC current and derivatives */
vbd = vbs - vds;
vgd = vgs - vds;
vgb = vgs - vbs;
/* Source/drain junction diode DC model begins */
Nvtm = model->BSIM3v32vtm * model->BSIM3v32jctEmissionCoeff;
/* acm model */
if (model->BSIM3v32acmMod == 0)
{
if ((here->BSIM3v32sourceArea <= 0.0)
&& (here->BSIM3v32sourcePerimeter <= 0.0))
{
SourceSatCurrent = 1.0e-14;
}
else
{
SourceSatCurrent = here->BSIM3v32sourceArea
* model->BSIM3v32jctTempSatCurDensity
+ here->BSIM3v32sourcePerimeter
* model->BSIM3v32jctSidewallTempSatCurDensity;
}
if ((here->BSIM3v32drainArea <= 0.0) && (here->BSIM3v32drainPerimeter <= 0.0))
{ DrainSatCurrent = 1.0e-14;
}
else
{ DrainSatCurrent = here->BSIM3v32drainArea
* model->BSIM3v32jctTempSatCurDensity
+ here->BSIM3v32drainPerimeter
* model->BSIM3v32jctSidewallTempSatCurDensity;
}
}
else
{
error = ACM_saturationCurrents(
model->BSIM3v32acmMod,
model->BSIM3v32calcacm,
here->BSIM3v32geo,
model->BSIM3v32hdif,
model->BSIM3v32wmlt,
here->BSIM3v32w,
model->BSIM3v32xw,
model->BSIM3v32jctTempSatCurDensity,
model->BSIM3v32jctSidewallTempSatCurDensity,
here->BSIM3v32drainAreaGiven,
here->BSIM3v32drainArea,
here->BSIM3v32drainPerimeterGiven,
here->BSIM3v32drainPerimeter,
here->BSIM3v32sourceAreaGiven,
here->BSIM3v32sourceArea,
here->BSIM3v32sourcePerimeterGiven,
here->BSIM3v32sourcePerimeter,
&DrainSatCurrent,
&SourceSatCurrent
);
if (error)
return(error);
}
if (SourceSatCurrent <= 0.0)
{ here->BSIM3v32gbs = ckt->CKTgmin;
here->BSIM3v32cbs = here->BSIM3v32gbs * vbs;
}
else
{ if (model->BSIM3v32ijth == 0.0)
{ evbs = exp(vbs / Nvtm);
here->BSIM3v32gbs = SourceSatCurrent * evbs / Nvtm + ckt->CKTgmin;
here->BSIM3v32cbs = SourceSatCurrent * (evbs - 1.0)
+ ckt->CKTgmin * vbs;
}
else
{ if (vbs < here->BSIM3v32vjsm)
{ evbs = exp(vbs / Nvtm);
here->BSIM3v32gbs = SourceSatCurrent * evbs / Nvtm + ckt->CKTgmin;
here->BSIM3v32cbs = SourceSatCurrent * (evbs - 1.0)
+ ckt->CKTgmin * vbs;
}
else
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
T0 = here->BSIM3v32IsEvjsm / Nvtm;
here->BSIM3v32gbs = T0 + (ckt->CKTgmin);
here->BSIM3v32cbs = here->BSIM3v32IsEvjsm - SourceSatCurrent
+ T0 * (vbs - here->BSIM3v32vjsm) + (ckt->CKTgmin) * vbs;
break;
case BSIM3v32V32:
default:
T0 = (SourceSatCurrent + model->BSIM3v32ijth) / Nvtm;
here->BSIM3v32gbs = T0 + (ckt->CKTgmin);
here->BSIM3v32cbs = model->BSIM3v32ijth + (ckt->CKTgmin) * vbs
+ T0 * (vbs - here->BSIM3v32vjsm);
}
}
}
}
if (DrainSatCurrent <= 0.0)
{ here->BSIM3v32gbd = ckt->CKTgmin;
here->BSIM3v32cbd = here->BSIM3v32gbd * vbd;
}
else
{ if (model->BSIM3v32ijth == 0.0)
{ evbd = exp(vbd / Nvtm);
here->BSIM3v32gbd = DrainSatCurrent * evbd / Nvtm + ckt->CKTgmin;
here->BSIM3v32cbd = DrainSatCurrent * (evbd - 1.0)
+ ckt->CKTgmin * vbd;
}
else
{ if (vbd < here->BSIM3v32vjdm)
{ evbd = exp(vbd / Nvtm);
here->BSIM3v32gbd = DrainSatCurrent * evbd / Nvtm + ckt->CKTgmin;
here->BSIM3v32cbd = DrainSatCurrent * (evbd - 1.0)
+ ckt->CKTgmin * vbd;
}
else
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
T0 = here->BSIM3v32IsEvjdm / Nvtm;
here->BSIM3v32gbd = T0 + (ckt->CKTgmin);
here->BSIM3v32cbd = here->BSIM3v32IsEvjdm - DrainSatCurrent
+ T0 * (vbd - here->BSIM3v32vjdm) + (ckt->CKTgmin) * vbd;
break;
case BSIM3v32V32:
default:
T0 = (DrainSatCurrent + model->BSIM3v32ijth) / Nvtm;
here->BSIM3v32gbd = T0 + (ckt->CKTgmin);
here->BSIM3v32cbd = model->BSIM3v32ijth + (ckt->CKTgmin) * vbd
+ T0 * (vbd - here->BSIM3v32vjdm);
}
}
}
}
/* End of diode DC model */
if (vds >= 0.0)
{ /* normal mode */
here->BSIM3v32mode = 1;
Vds = vds;
Vgs = vgs;
Vbs = vbs;
}
else
{ /* inverse mode */
here->BSIM3v32mode = -1;
Vds = -vds;
Vgs = vgd;
Vbs = vbd;
}
T0 = Vbs - pParam->BSIM3v32vbsc - 0.001;
T1 = sqrt(T0 * T0 - 0.004 * pParam->BSIM3v32vbsc);
Vbseff = pParam->BSIM3v32vbsc + 0.5 * (T0 + T1);
dVbseff_dVb = 0.5 * (1.0 + T0 / T1);
if (Vbseff < Vbs)
{ Vbseff = Vbs;
}
if (Vbseff > 0.0)
{ T0 = pParam->BSIM3v32phi / (pParam->BSIM3v32phi + Vbseff);
Phis = pParam->BSIM3v32phi * T0;
dPhis_dVb = -T0 * T0;
sqrtPhis = pParam->BSIM3v32phis3 / (pParam->BSIM3v32phi + 0.5 * Vbseff);
dsqrtPhis_dVb = -0.5 * sqrtPhis * sqrtPhis / pParam->BSIM3v32phis3;
}
else
{ Phis = pParam->BSIM3v32phi - Vbseff;
dPhis_dVb = -1.0;
sqrtPhis = sqrt(Phis);
dsqrtPhis_dVb = -0.5 / sqrtPhis;
}
Xdep = pParam->BSIM3v32Xdep0 * sqrtPhis / pParam->BSIM3v32sqrtPhi;
dXdep_dVb = (pParam->BSIM3v32Xdep0 / pParam->BSIM3v32sqrtPhi)
* dsqrtPhis_dVb;
Leff = pParam->BSIM3v32leff;
Vtm = model->BSIM3v32vtm;
/* Vth Calculation */
T3 = sqrt(Xdep);
V0 = pParam->BSIM3v32vbi - pParam->BSIM3v32phi;
T0 = pParam->BSIM3v32dvt2 * Vbseff;
if (T0 >= - 0.5)
{ T1 = 1.0 + T0;
T2 = pParam->BSIM3v32dvt2;
}
else /* Added to avoid any discontinuity problems caused by dvt2 */
{ T4 = 1.0 / (3.0 + 8.0 * T0);
T1 = (1.0 + 3.0 * T0) * T4;
T2 = pParam->BSIM3v32dvt2 * T4 * T4;
}
lt1 = model->BSIM3v32factor1 * T3 * T1;
dlt1_dVb = model->BSIM3v32factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2);
T0 = pParam->BSIM3v32dvt2w * Vbseff;
if (T0 >= - 0.5)
{ T1 = 1.0 + T0;
T2 = pParam->BSIM3v32dvt2w;
}
else /* Added to avoid any discontinuity problems caused by dvt2w */
{ T4 = 1.0 / (3.0 + 8.0 * T0);
T1 = (1.0 + 3.0 * T0) * T4;
T2 = pParam->BSIM3v32dvt2w * T4 * T4;
}
ltw = model->BSIM3v32factor1 * T3 * T1;
dltw_dVb = model->BSIM3v32factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2);
T0 = -0.5 * pParam->BSIM3v32dvt1 * Leff / lt1;
if (T0 > -EXP_THRESHOLD)
{ T1 = exp(T0);
Theta0 = T1 * (1.0 + 2.0 * T1);
dT1_dVb = -T0 / lt1 * T1 * dlt1_dVb;
dTheta0_dVb = (1.0 + 4.0 * T1) * dT1_dVb;
}
else
{ T1 = MIN_EXP;
Theta0 = T1 * (1.0 + 2.0 * T1);
dTheta0_dVb = 0.0;
}
here->BSIM3v32thetavth = pParam->BSIM3v32dvt0 * Theta0;
Delt_vth = here->BSIM3v32thetavth * V0;
dDelt_vth_dVb = pParam->BSIM3v32dvt0 * dTheta0_dVb * V0;
T0 = -0.5 * pParam->BSIM3v32dvt1w * pParam->BSIM3v32weff * Leff / ltw;
if (T0 > -EXP_THRESHOLD)
{ T1 = exp(T0);
T2 = T1 * (1.0 + 2.0 * T1);
dT1_dVb = -T0 / ltw * T1 * dltw_dVb;
dT2_dVb = (1.0 + 4.0 * T1) * dT1_dVb;
}
else
{ T1 = MIN_EXP;
T2 = T1 * (1.0 + 2.0 * T1);
dT2_dVb = 0.0;
}
T0 = pParam->BSIM3v32dvt0w * T2;
T2 = T0 * V0;
dT2_dVb = pParam->BSIM3v32dvt0w * dT2_dVb * V0;
TempRatio = ckt->CKTtemp / model->BSIM3v32tnom - 1.0;
T0 = sqrt(1.0 + pParam->BSIM3v32nlx / Leff);
T1 = pParam->BSIM3v32k1ox * (T0 - 1.0) * pParam->BSIM3v32sqrtPhi
+ (pParam->BSIM3v32kt1 + pParam->BSIM3v32kt1l / Leff
+ pParam->BSIM3v32kt2 * Vbseff) * TempRatio;
tmp2 = model->BSIM3v32tox * pParam->BSIM3v32phi
/ (pParam->BSIM3v32weff + pParam->BSIM3v32w0);
T3 = pParam->BSIM3v32eta0 + pParam->BSIM3v32etab * Vbseff;
if (T3 < 1.0e-4) /* avoid discontinuity problems caused by etab */
{ T9 = 1.0 / (3.0 - 2.0e4 * T3);
T3 = (2.0e-4 - T3) * T9;
T4 = T9 * T9;
}
else
{ T4 = 1.0;
}
dDIBL_Sft_dVd = T3 * pParam->BSIM3v32theta0vb0;
DIBL_Sft = dDIBL_Sft_dVd * Vds;
Vth = model->BSIM3v32type * here->BSIM3v32vth0 - pParam->BSIM3v32k1
* pParam->BSIM3v32sqrtPhi + pParam->BSIM3v32k1ox * sqrtPhis
- pParam->BSIM3v32k2ox * Vbseff - Delt_vth - T2 + (pParam->BSIM3v32k3
+ pParam->BSIM3v32k3b * Vbseff) * tmp2 + T1 - DIBL_Sft;
here->BSIM3v32von = Vth;
dVth_dVb = pParam->BSIM3v32k1ox * dsqrtPhis_dVb - pParam->BSIM3v32k2ox
- dDelt_vth_dVb - dT2_dVb + pParam->BSIM3v32k3b * tmp2
- pParam->BSIM3v32etab * Vds * pParam->BSIM3v32theta0vb0 * T4
+ pParam->BSIM3v32kt2 * TempRatio;
dVth_dVd = -dDIBL_Sft_dVd;
/* Calculate n */
tmp2 = pParam->BSIM3v32nfactor * EPSSI / Xdep;
tmp3 = pParam->BSIM3v32cdsc + pParam->BSIM3v32cdscb * Vbseff
+ pParam->BSIM3v32cdscd * Vds;
tmp4 = (tmp2 + tmp3 * Theta0 + pParam->BSIM3v32cit) / model->BSIM3v32cox;
if (tmp4 >= -0.5)
{ n = 1.0 + tmp4;
dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb
+ pParam->BSIM3v32cdscb * Theta0) / model->BSIM3v32cox;
dn_dVd = pParam->BSIM3v32cdscd * Theta0 / model->BSIM3v32cox;
}
else
/* avoid discontinuity problems caused by tmp4 */
{ T0 = 1.0 / (3.0 + 8.0 * tmp4);
n = (1.0 + 3.0 * tmp4) * T0;
T0 *= T0;
dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb
+ pParam->BSIM3v32cdscb * Theta0) / model->BSIM3v32cox * T0;
dn_dVd = pParam->BSIM3v32cdscd * Theta0 / model->BSIM3v32cox * T0;
}
/* Poly Gate Si Depletion Effect */
T0 = here->BSIM3v32vfb + pParam->BSIM3v32phi;
if ((pParam->BSIM3v32ngate > 1.e18) && (pParam->BSIM3v32ngate < 1.e25)
&& (Vgs > T0))
/* added to avoid the problem caused by ngate */
{ T1 = 1.0e6 * Charge_q * EPSSI * pParam->BSIM3v32ngate
/ (model->BSIM3v32cox * model->BSIM3v32cox);
T4 = sqrt(1.0 + 2.0 * (Vgs - T0) / T1);
T2 = T1 * (T4 - 1.0);
T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */
T7 = 1.12 - T3 - 0.05;
T6 = sqrt(T7 * T7 + 0.224);
T5 = 1.12 - 0.5 * (T7 + T6);
Vgs_eff = Vgs - T5;
dVgs_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6);
}
else
{ Vgs_eff = Vgs;
dVgs_eff_dVg = 1.0;
}
Vgst = Vgs_eff - Vth;
/* Effective Vgst (Vgsteff) Calculation */
T10 = 2.0 * n * Vtm;
VgstNVt = Vgst / T10;
ExpArg = (2.0 * pParam->BSIM3v32voff - Vgst) / T10;
/* MCJ: Very small Vgst */
if (VgstNVt > EXP_THRESHOLD)
{ Vgsteff = Vgst;
dVgsteff_dVg = dVgs_eff_dVg;
dVgsteff_dVd = -dVth_dVd;
dVgsteff_dVb = -dVth_dVb;
}
else if (ExpArg > EXP_THRESHOLD)
{ T0 = (Vgst - pParam->BSIM3v32voff) / (n * Vtm);
ExpVgst = exp(T0);
Vgsteff = Vtm * pParam->BSIM3v32cdep0 / model->BSIM3v32cox * ExpVgst;
dVgsteff_dVg = Vgsteff / (n * Vtm);
dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + T0 * Vtm * dn_dVd);
dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + T0 * Vtm * dn_dVb);
dVgsteff_dVg *= dVgs_eff_dVg;
}
else
{ ExpVgst = exp(VgstNVt);
T1 = T10 * log(1.0 + ExpVgst);
dT1_dVg = ExpVgst / (1.0 + ExpVgst);
dT1_dVb = -dT1_dVg * (dVth_dVb + Vgst / n * dn_dVb)
+ T1 / n * dn_dVb;
dT1_dVd = -dT1_dVg * (dVth_dVd + Vgst / n * dn_dVd)
+ T1 / n * dn_dVd;
dT2_dVg = -model->BSIM3v32cox / (Vtm * pParam->BSIM3v32cdep0)
* exp(ExpArg);
T2 = 1.0 - T10 * dT2_dVg;
dT2_dVd = -dT2_dVg * (dVth_dVd - 2.0 * Vtm * ExpArg * dn_dVd)
+ (T2 - 1.0) / n * dn_dVd;
dT2_dVb = -dT2_dVg * (dVth_dVb - 2.0 * Vtm * ExpArg * dn_dVb)
+ (T2 - 1.0) / n * dn_dVb;
Vgsteff = T1 / T2;
T3 = T2 * T2;
dVgsteff_dVg = (T2 * dT1_dVg - T1 * dT2_dVg) / T3 * dVgs_eff_dVg;
dVgsteff_dVd = (T2 * dT1_dVd - T1 * dT2_dVd) / T3;
dVgsteff_dVb = (T2 * dT1_dVb - T1 * dT2_dVb) / T3;
}
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V323) {
here->BSIM3v32Vgsteff = Vgsteff;
}
/* Calculate Effective Channel Geometry */
T9 = sqrtPhis - pParam->BSIM3v32sqrtPhi;
Weff = pParam->BSIM3v32weff - 2.0 * (pParam->BSIM3v32dwg * Vgsteff
+ pParam->BSIM3v32dwb * T9);
dWeff_dVg = -2.0 * pParam->BSIM3v32dwg;
dWeff_dVb = -2.0 * pParam->BSIM3v32dwb * dsqrtPhis_dVb;
if (Weff < 2.0e-8) /* to avoid the discontinuity problem due to Weff*/
{ T0 = 1.0 / (6.0e-8 - 2.0 * Weff);
Weff = 2.0e-8 * (4.0e-8 - Weff) * T0;
T0 *= T0 * 4.0e-16;
dWeff_dVg *= T0;
dWeff_dVb *= T0;
}
T0 = pParam->BSIM3v32prwg * Vgsteff + pParam->BSIM3v32prwb * T9;
if (T0 >= -0.9)
{ Rds = pParam->BSIM3v32rds0 * (1.0 + T0);
dRds_dVg = pParam->BSIM3v32rds0 * pParam->BSIM3v32prwg;
dRds_dVb = pParam->BSIM3v32rds0 * pParam->BSIM3v32prwb * dsqrtPhis_dVb;
}
else
/* to avoid the discontinuity problem due to prwg and prwb*/
{ T1 = 1.0 / (17.0 + 20.0 * T0);
Rds = pParam->BSIM3v32rds0 * (0.8 + T0) * T1;
T1 *= T1;
dRds_dVg = pParam->BSIM3v32rds0 * pParam->BSIM3v32prwg * T1;
dRds_dVb = pParam->BSIM3v32rds0 * pParam->BSIM3v32prwb * dsqrtPhis_dVb
* T1;
}
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V323) {
here->BSIM3v32rds = Rds; /* Noise Bugfix */
}
/* Calculate Abulk */
T1 = 0.5 * pParam->BSIM3v32k1ox / sqrtPhis;
dT1_dVb = -T1 / sqrtPhis * dsqrtPhis_dVb;
T9 = sqrt(pParam->BSIM3v32xj * Xdep);
tmp1 = Leff + 2.0 * T9;
T5 = Leff / tmp1;
tmp2 = pParam->BSIM3v32a0 * T5;
tmp3 = pParam->BSIM3v32weff + pParam->BSIM3v32b1;
tmp4 = pParam->BSIM3v32b0 / tmp3;
T2 = tmp2 + tmp4;
dT2_dVb = -T9 / tmp1 / Xdep * dXdep_dVb;
T6 = T5 * T5;
T7 = T5 * T6;
Abulk0 = 1.0 + T1 * T2;
dAbulk0_dVb = T1 * tmp2 * dT2_dVb + T2 * dT1_dVb;
T8 = pParam->BSIM3v32ags * pParam->BSIM3v32a0 * T7;
dAbulk_dVg = -T1 * T8;
Abulk = Abulk0 + dAbulk_dVg * Vgsteff;
dAbulk_dVb = dAbulk0_dVb - T8 * Vgsteff * (dT1_dVb
+ 3.0 * T1 * dT2_dVb);
if (Abulk0 < 0.1) /* added to avoid the problems caused by Abulk0 */
{ T9 = 1.0 / (3.0 - 20.0 * Abulk0);
Abulk0 = (0.2 - Abulk0) * T9;
dAbulk0_dVb *= T9 * T9;
}
if (Abulk < 0.1)
/* added to avoid the problems caused by Abulk */
{ T9 = 1.0 / (3.0 - 20.0 * Abulk);
Abulk = (0.2 - Abulk) * T9;
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V32) {
T10 = T9 * T9;
dAbulk_dVb *= T10;
dAbulk_dVg *= T10;
} else {
dAbulk_dVb *= T9 * T9;
}
}
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V323) {
here->BSIM3v32Abulk = Abulk;
}
T2 = pParam->BSIM3v32keta * Vbseff;
if (T2 >= -0.9)
{ T0 = 1.0 / (1.0 + T2);
dT0_dVb = -pParam->BSIM3v32keta * T0 * T0;
}
else
/* added to avoid the problems caused by Keta */
{ T1 = 1.0 / (0.8 + T2);
T0 = (17.0 + 20.0 * T2) * T1;
dT0_dVb = -pParam->BSIM3v32keta * T1 * T1;
}
dAbulk_dVg *= T0;
dAbulk_dVb = dAbulk_dVb * T0 + Abulk * dT0_dVb;
dAbulk0_dVb = dAbulk0_dVb * T0 + Abulk0 * dT0_dVb;
Abulk *= T0;
Abulk0 *= T0;
/* Mobility calculation */
if (model->BSIM3v32mobMod == 1)
{ T0 = Vgsteff + Vth + Vth;
T2 = pParam->BSIM3v32ua + pParam->BSIM3v32uc * Vbseff;
T3 = T0 / model->BSIM3v32tox;
T5 = T3 * (T2 + pParam->BSIM3v32ub * T3);
dDenomi_dVg = (T2 + 2.0 * pParam->BSIM3v32ub * T3) / model->BSIM3v32tox;
dDenomi_dVd = dDenomi_dVg * 2.0 * dVth_dVd;
dDenomi_dVb = dDenomi_dVg * 2.0 * dVth_dVb + pParam->BSIM3v32uc * T3;
}
else if (model->BSIM3v32mobMod == 2)
{ T5 = Vgsteff / model->BSIM3v32tox * (pParam->BSIM3v32ua
+ pParam->BSIM3v32uc * Vbseff + pParam->BSIM3v32ub * Vgsteff
/ model->BSIM3v32tox);
dDenomi_dVg = (pParam->BSIM3v32ua + pParam->BSIM3v32uc * Vbseff
+ 2.0 * pParam->BSIM3v32ub * Vgsteff / model->BSIM3v32tox)
/ model->BSIM3v32tox;
dDenomi_dVd = 0.0;
dDenomi_dVb = Vgsteff * pParam->BSIM3v32uc / model->BSIM3v32tox;
}
else
{ T0 = Vgsteff + Vth + Vth;
T2 = 1.0 + pParam->BSIM3v32uc * Vbseff;
T3 = T0 / model->BSIM3v32tox;
T4 = T3 * (pParam->BSIM3v32ua + pParam->BSIM3v32ub * T3);
T5 = T4 * T2;
dDenomi_dVg = (pParam->BSIM3v32ua + 2.0 * pParam->BSIM3v32ub * T3) * T2
/ model->BSIM3v32tox;
dDenomi_dVd = dDenomi_dVg * 2.0 * dVth_dVd;
dDenomi_dVb = dDenomi_dVg * 2.0 * dVth_dVb + pParam->BSIM3v32uc * T4;
}
if (T5 >= -0.8)
{ Denomi = 1.0 + T5;
}
else /* Added to avoid the discontinuity problem caused by ua and ub*/
{ T9 = 1.0 / (7.0 + 10.0 * T5);
Denomi = (0.6 + T5) * T9;
T9 *= T9;
dDenomi_dVg *= T9;
dDenomi_dVd *= T9;
dDenomi_dVb *= T9;
}
here->BSIM3v32ueff = ueff = here->BSIM3v32u0temp / Denomi;
T9 = -ueff / Denomi;
dueff_dVg = T9 * dDenomi_dVg;
dueff_dVd = T9 * dDenomi_dVd;
dueff_dVb = T9 * dDenomi_dVb;
/* Saturation Drain Voltage Vdsat */
WVCox = Weff * pParam->BSIM3v32vsattemp * model->BSIM3v32cox;
WVCoxRds = WVCox * Rds;
Esat = 2.0 * pParam->BSIM3v32vsattemp / ueff;
EsatL = Esat * Leff;
T0 = -EsatL /ueff;
dEsatL_dVg = T0 * dueff_dVg;
dEsatL_dVd = T0 * dueff_dVd;
dEsatL_dVb = T0 * dueff_dVb;
/* Sqrt() */
a1 = pParam->BSIM3v32a1;
if (a1 == 0.0)
{ Lambda = pParam->BSIM3v32a2;
dLambda_dVg = 0.0;
}
else if (a1 > 0.0)
/* Added to avoid the discontinuity problem
caused by a1 and a2 (Lambda) */
{ T0 = 1.0 - pParam->BSIM3v32a2;
T1 = T0 - pParam->BSIM3v32a1 * Vgsteff - 0.0001;
T2 = sqrt(T1 * T1 + 0.0004 * T0);
Lambda = pParam->BSIM3v32a2 + T0 - 0.5 * (T1 + T2);
dLambda_dVg = 0.5 * pParam->BSIM3v32a1 * (1.0 + T1 / T2);
}
else
{ T1 = pParam->BSIM3v32a2 + pParam->BSIM3v32a1 * Vgsteff - 0.0001;
T2 = sqrt(T1 * T1 + 0.0004 * pParam->BSIM3v32a2);
Lambda = 0.5 * (T1 + T2);
dLambda_dVg = 0.5 * pParam->BSIM3v32a1 * (1.0 + T1 / T2);
}
Vgst2Vtm = Vgsteff + 2.0 * Vtm;
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V323) {
here->BSIM3v32AbovVgst2Vtm = Abulk / Vgst2Vtm;
}
if (Rds > 0)
{ tmp2 = dRds_dVg / Rds + dWeff_dVg / Weff;
tmp3 = dRds_dVb / Rds + dWeff_dVb / Weff;
}
else
{ tmp2 = dWeff_dVg / Weff;
tmp3 = dWeff_dVb / Weff;
}
if ((Rds == 0.0) && (Lambda == 1.0))
{ T0 = 1.0 / (Abulk * EsatL + Vgst2Vtm);
tmp1 = 0.0;
T1 = T0 * T0;
T2 = Vgst2Vtm * T0;
T3 = EsatL * Vgst2Vtm;
Vdsat = T3 * T0;
dT0_dVg = -(Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 1.0) * T1;
dT0_dVd = -(Abulk * dEsatL_dVd) * T1;
dT0_dVb = -(Abulk * dEsatL_dVb + dAbulk_dVb * EsatL) * T1;
dVdsat_dVg = T3 * dT0_dVg + T2 * dEsatL_dVg + EsatL * T0;
dVdsat_dVd = T3 * dT0_dVd + T2 * dEsatL_dVd;
dVdsat_dVb = T3 * dT0_dVb + T2 * dEsatL_dVb;
}
else
{ tmp1 = dLambda_dVg / (Lambda * Lambda);
T9 = Abulk * WVCoxRds;
T8 = Abulk * T9;
T7 = Vgst2Vtm * T9;
T6 = Vgst2Vtm * WVCoxRds;
T0 = 2.0 * Abulk * (T9 - 1.0 + 1.0 / Lambda);
dT0_dVg = 2.0 * (T8 * tmp2 - Abulk * tmp1
+ (2.0 * T9 + 1.0 / Lambda - 1.0) * dAbulk_dVg);
dT0_dVb = 2.0 * (T8 * (2.0 / Abulk * dAbulk_dVb + tmp3)
+ (1.0 / Lambda - 1.0) * dAbulk_dVb);
dT0_dVd = 0.0;
T1 = Vgst2Vtm * (2.0 / Lambda - 1.0) + Abulk * EsatL + 3.0 * T7;
dT1_dVg = (2.0 / Lambda - 1.0) - 2.0 * Vgst2Vtm * tmp1
+ Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 3.0 * (T9
+ T7 * tmp2 + T6 * dAbulk_dVg);
dT1_dVb = Abulk * dEsatL_dVb + EsatL * dAbulk_dVb
+ 3.0 * (T6 * dAbulk_dVb + T7 * tmp3);
dT1_dVd = Abulk * dEsatL_dVd;
T2 = Vgst2Vtm * (EsatL + 2.0 * T6);
dT2_dVg = EsatL + Vgst2Vtm * dEsatL_dVg
+ T6 * (4.0 + 2.0 * Vgst2Vtm * tmp2);
dT2_dVb = Vgst2Vtm * (dEsatL_dVb + 2.0 * T6 * tmp3);
dT2_dVd = Vgst2Vtm * dEsatL_dVd;
T3 = sqrt(T1 * T1 - 2.0 * T0 * T2);
Vdsat = (T1 - T3) / T0;
dT3_dVg = (T1 * dT1_dVg - 2.0 * (T0 * dT2_dVg + T2 * dT0_dVg))
/ T3;
dT3_dVd = (T1 * dT1_dVd - 2.0 * (T0 * dT2_dVd + T2 * dT0_dVd))
/ T3;
dT3_dVb = (T1 * dT1_dVb - 2.0 * (T0 * dT2_dVb + T2 * dT0_dVb))
/ T3;
dVdsat_dVg = (dT1_dVg - (T1 * dT1_dVg - dT0_dVg * T2
- T0 * dT2_dVg) / T3 - Vdsat * dT0_dVg) / T0;
dVdsat_dVb = (dT1_dVb - (T1 * dT1_dVb - dT0_dVb * T2
- T0 * dT2_dVb) / T3 - Vdsat * dT0_dVb) / T0;
dVdsat_dVd = (dT1_dVd - (T1 * dT1_dVd - T0 * dT2_dVd) / T3) / T0;
}
here->BSIM3v32vdsat = Vdsat;
/* Effective Vds (Vdseff) Calculation */
T1 = Vdsat - Vds - pParam->BSIM3v32delta;
dT1_dVg = dVdsat_dVg;
dT1_dVd = dVdsat_dVd - 1.0;
dT1_dVb = dVdsat_dVb;
T2 = sqrt(T1 * T1 + 4.0 * pParam->BSIM3v32delta * Vdsat);
T0 = T1 / T2;
T3 = 2.0 * pParam->BSIM3v32delta / T2;
dT2_dVg = T0 * dT1_dVg + T3 * dVdsat_dVg;
dT2_dVd = T0 * dT1_dVd + T3 * dVdsat_dVd;
dT2_dVb = T0 * dT1_dVb + T3 * dVdsat_dVb;
Vdseff = Vdsat - 0.5 * (T1 + T2);
dVdseff_dVg = dVdsat_dVg - 0.5 * (dT1_dVg + dT2_dVg);
dVdseff_dVd = dVdsat_dVd - 0.5 * (dT1_dVd + dT2_dVd);
dVdseff_dVb = dVdsat_dVb - 0.5 * (dT1_dVb + dT2_dVb);
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
/* Added to eliminate non-zero Vdseff at Vds=0.0 */
if (Vds == 0.0)
{
Vdseff = 0.0;
dVdseff_dVg = 0.0;
dVdseff_dVb = 0.0;
}
break;
case BSIM3v32V32:
default:
/* Do nothing */
break;
}
/* Calculate VAsat */
tmp4 = 1.0 - 0.5 * Abulk * Vdsat / Vgst2Vtm;
T9 = WVCoxRds * Vgsteff;
T8 = T9 / Vgst2Vtm;
T0 = EsatL + Vdsat + 2.0 * T9 * tmp4;
T7 = 2.0 * WVCoxRds * tmp4;
dT0_dVg = dEsatL_dVg + dVdsat_dVg + T7 * (1.0 + tmp2 * Vgsteff)
- T8 * (Abulk * dVdsat_dVg - Abulk * Vdsat / Vgst2Vtm
+ Vdsat * dAbulk_dVg);
dT0_dVb = dEsatL_dVb + dVdsat_dVb + T7 * tmp3 * Vgsteff
- T8 * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb);
dT0_dVd = dEsatL_dVd + dVdsat_dVd - T8 * Abulk * dVdsat_dVd;
T9 = WVCoxRds * Abulk;
T1 = 2.0 / Lambda - 1.0 + T9;
dT1_dVg = -2.0 * tmp1 + WVCoxRds * (Abulk * tmp2 + dAbulk_dVg);
dT1_dVb = dAbulk_dVb * WVCoxRds + T9 * tmp3;
Vasat = T0 / T1;
dVasat_dVg = (dT0_dVg - Vasat * dT1_dVg) / T1;
dVasat_dVb = (dT0_dVb - Vasat * dT1_dVb) / T1;
dVasat_dVd = dT0_dVd / T1;
if (Vdseff > Vds)
Vdseff = Vds;
diffVds = Vds - Vdseff;
/* Added revision dependent code */
if (model->BSIM3v32intVersion > BSIM3v32V323) {
here->BSIM3v32Vdseff = Vdseff;
}
/* Calculate VACLM */
if ((pParam->BSIM3v32pclm > 0.0) && (diffVds > 1.0e-10))
{ T0 = 1.0 / (pParam->BSIM3v32pclm * Abulk * pParam->BSIM3v32litl);
dT0_dVb = -T0 / Abulk * dAbulk_dVb;
dT0_dVg = -T0 / Abulk * dAbulk_dVg;
T2 = Vgsteff / EsatL;
T1 = Leff * (Abulk + T2);
dT1_dVg = Leff * ((1.0 - T2 * dEsatL_dVg) / EsatL + dAbulk_dVg);
dT1_dVb = Leff * (dAbulk_dVb - T2 * dEsatL_dVb / EsatL);
dT1_dVd = -T2 * dEsatL_dVd / Esat;
T9 = T0 * T1;
VACLM = T9 * diffVds;
dVACLM_dVg = T0 * dT1_dVg * diffVds - T9 * dVdseff_dVg
+ T1 * diffVds * dT0_dVg;
dVACLM_dVb = (dT0_dVb * T1 + T0 * dT1_dVb) * diffVds
- T9 * dVdseff_dVb;
dVACLM_dVd = T0 * dT1_dVd * diffVds + T9 * (1.0 - dVdseff_dVd);
}
else
{ VACLM = MAX_EXP;
dVACLM_dVd = dVACLM_dVg = dVACLM_dVb = 0.0;
}
/* Calculate VADIBL */
if (pParam->BSIM3v32thetaRout > 0.0)
{ T8 = Abulk * Vdsat;
T0 = Vgst2Vtm * T8;
dT0_dVg = Vgst2Vtm * Abulk * dVdsat_dVg + T8
+ Vgst2Vtm * Vdsat * dAbulk_dVg;
dT0_dVb = Vgst2Vtm * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb);
dT0_dVd = Vgst2Vtm * Abulk * dVdsat_dVd;
T1 = Vgst2Vtm + T8;
dT1_dVg = 1.0 + Abulk * dVdsat_dVg + Vdsat * dAbulk_dVg;
dT1_dVb = Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat;
dT1_dVd = Abulk * dVdsat_dVd;
T9 = T1 * T1;
T2 = pParam->BSIM3v32thetaRout;
VADIBL = (Vgst2Vtm - T0 / T1) / T2;
dVADIBL_dVg = (1.0 - dT0_dVg / T1 + T0 * dT1_dVg / T9) / T2;
dVADIBL_dVb = (-dT0_dVb / T1 + T0 * dT1_dVb / T9) / T2;
dVADIBL_dVd = (-dT0_dVd / T1 + T0 * dT1_dVd / T9) / T2;
T7 = pParam->BSIM3v32pdiblb * Vbseff;
if (T7 >= -0.9)
{ T3 = 1.0 / (1.0 + T7);
VADIBL *= T3;
dVADIBL_dVg *= T3;
dVADIBL_dVb = (dVADIBL_dVb - VADIBL * pParam->BSIM3v32pdiblb)
* T3;
dVADIBL_dVd *= T3;
}
else
/* Added to avoid the discontinuity problem caused by pdiblcb */
{ T4 = 1.0 / (0.8 + T7);
T3 = (17.0 + 20.0 * T7) * T4;
dVADIBL_dVg *= T3;
dVADIBL_dVb = dVADIBL_dVb * T3
- VADIBL * pParam->BSIM3v32pdiblb * T4 * T4;
dVADIBL_dVd *= T3;
VADIBL *= T3;
}
}
else
{ VADIBL = MAX_EXP;
dVADIBL_dVd = dVADIBL_dVg = dVADIBL_dVb = 0.0;
}
/* Calculate VA */
T8 = pParam->BSIM3v32pvag / EsatL;
T9 = T8 * Vgsteff;
if (T9 > -0.9)
{ T0 = 1.0 + T9;
dT0_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL);
dT0_dVb = -T9 * dEsatL_dVb / EsatL;
dT0_dVd = -T9 * dEsatL_dVd / EsatL;
}
else /* Added to avoid the discontinuity problems caused by pvag */
{ T1 = 1.0 / (17.0 + 20.0 * T9);
T0 = (0.8 + T9) * T1;
T1 *= T1;
dT0_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL) * T1;
T9 *= T1 / EsatL;
dT0_dVb = -T9 * dEsatL_dVb;
dT0_dVd = -T9 * dEsatL_dVd;
}
tmp1 = VACLM * VACLM;
tmp2 = VADIBL * VADIBL;
tmp3 = VACLM + VADIBL;
T1 = VACLM * VADIBL / tmp3;
tmp3 *= tmp3;
dT1_dVg = (tmp1 * dVADIBL_dVg + tmp2 * dVACLM_dVg) / tmp3;
dT1_dVd = (tmp1 * dVADIBL_dVd + tmp2 * dVACLM_dVd) / tmp3;
dT1_dVb = (tmp1 * dVADIBL_dVb + tmp2 * dVACLM_dVb) / tmp3;
Va = Vasat + T0 * T1;
dVa_dVg = dVasat_dVg + T1 * dT0_dVg + T0 * dT1_dVg;
dVa_dVd = dVasat_dVd + T1 * dT0_dVd + T0 * dT1_dVd;
dVa_dVb = dVasat_dVb + T1 * dT0_dVb + T0 * dT1_dVb;
/* Calculate VASCBE */
if (pParam->BSIM3v32pscbe2 > 0.0)
{ if (diffVds > pParam->BSIM3v32pscbe1 * pParam->BSIM3v32litl
/ EXP_THRESHOLD)
{ T0 = pParam->BSIM3v32pscbe1 * pParam->BSIM3v32litl / diffVds;
VASCBE = Leff * exp(T0) / pParam->BSIM3v32pscbe2;
T1 = T0 * VASCBE / diffVds;
dVASCBE_dVg = T1 * dVdseff_dVg;
dVASCBE_dVd = -T1 * (1.0 - dVdseff_dVd);
dVASCBE_dVb = T1 * dVdseff_dVb;
}
else
{ VASCBE = MAX_EXP * Leff/pParam->BSIM3v32pscbe2;
dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0;
}
}
else
{ VASCBE = MAX_EXP;
dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0;
}
/* Calculate Ids */
CoxWovL = model->BSIM3v32cox * Weff / Leff;
beta = ueff * CoxWovL;
dbeta_dVg = CoxWovL * dueff_dVg + beta * dWeff_dVg / Weff;
dbeta_dVd = CoxWovL * dueff_dVd;
dbeta_dVb = CoxWovL * dueff_dVb + beta * dWeff_dVb / Weff;
T0 = 1.0 - 0.5 * Abulk * Vdseff / Vgst2Vtm;
dT0_dVg = -0.5 * (Abulk * dVdseff_dVg
- Abulk * Vdseff / Vgst2Vtm + Vdseff * dAbulk_dVg) / Vgst2Vtm;
dT0_dVd = -0.5 * Abulk * dVdseff_dVd / Vgst2Vtm;
dT0_dVb = -0.5 * (Abulk * dVdseff_dVb + dAbulk_dVb * Vdseff)
/ Vgst2Vtm;
fgche1 = Vgsteff * T0;
dfgche1_dVg = Vgsteff * dT0_dVg + T0;
dfgche1_dVd = Vgsteff * dT0_dVd;
dfgche1_dVb = Vgsteff * dT0_dVb;
T9 = Vdseff / EsatL;
fgche2 = 1.0 + T9;
dfgche2_dVg = (dVdseff_dVg - T9 * dEsatL_dVg) / EsatL;
dfgche2_dVd = (dVdseff_dVd - T9 * dEsatL_dVd) / EsatL;
dfgche2_dVb = (dVdseff_dVb - T9 * dEsatL_dVb) / EsatL;
gche = beta * fgche1 / fgche2;
dgche_dVg = (beta * dfgche1_dVg + fgche1 * dbeta_dVg
- gche * dfgche2_dVg) / fgche2;
dgche_dVd = (beta * dfgche1_dVd + fgche1 * dbeta_dVd
- gche * dfgche2_dVd) / fgche2;
dgche_dVb = (beta * dfgche1_dVb + fgche1 * dbeta_dVb
- gche * dfgche2_dVb) / fgche2;
T0 = 1.0 + gche * Rds;
T9 = Vdseff / T0;
Idl = gche * T9;
dIdl_dVg = (gche * dVdseff_dVg + T9 * dgche_dVg) / T0
- Idl * gche / T0 * dRds_dVg ;
dIdl_dVd = (gche * dVdseff_dVd + T9 * dgche_dVd) / T0;
dIdl_dVb = (gche * dVdseff_dVb + T9 * dgche_dVb
- Idl * dRds_dVb * gche) / T0;
T9 = diffVds / Va;
T0 = 1.0 + T9;
Idsa = Idl * T0;
dIdsa_dVg = T0 * dIdl_dVg - Idl * (dVdseff_dVg + T9 * dVa_dVg) / Va;
dIdsa_dVd = T0 * dIdl_dVd + Idl * (1.0 - dVdseff_dVd
- T9 * dVa_dVd) / Va;
dIdsa_dVb = T0 * dIdl_dVb - Idl * (dVdseff_dVb + T9 * dVa_dVb) / Va;
T9 = diffVds / VASCBE;
T0 = 1.0 + T9;
Ids = Idsa * T0;
Gm = T0 * dIdsa_dVg - Idsa * (dVdseff_dVg + T9 * dVASCBE_dVg) / VASCBE;
Gds = T0 * dIdsa_dVd + Idsa * (1.0 - dVdseff_dVd
- T9 * dVASCBE_dVd) / VASCBE;
Gmb = T0 * dIdsa_dVb - Idsa * (dVdseff_dVb
+ T9 * dVASCBE_dVb) / VASCBE;
Gds += Gm * dVgsteff_dVd;
Gmb += Gm * dVgsteff_dVb;
Gm *= dVgsteff_dVg;
Gmb *= dVbseff_dVb;
/* Substrate current begins */
tmp = pParam->BSIM3v32alpha0 + pParam->BSIM3v32alpha1 * Leff;
if ((tmp <= 0.0) || (pParam->BSIM3v32beta0 <= 0.0))
{ Isub = Gbd = Gbb = Gbg = 0.0;
}
else
{ T2 = tmp / Leff;
if (diffVds > pParam->BSIM3v32beta0 / EXP_THRESHOLD)
{ T0 = -pParam->BSIM3v32beta0 / diffVds;
T1 = T2 * diffVds * exp(T0);
T3 = T1 / diffVds * (T0 - 1.0);
dT1_dVg = T3 * dVdseff_dVg;
dT1_dVd = T3 * (dVdseff_dVd - 1.0);
dT1_dVb = T3 * dVdseff_dVb;
}
else
{ T3 = T2 * MIN_EXP;
T1 = T3 * diffVds;
dT1_dVg = -T3 * dVdseff_dVg;
dT1_dVd = T3 * (1.0 - dVdseff_dVd);
dT1_dVb = -T3 * dVdseff_dVb;
}
Isub = T1 * Idsa;
Gbg = T1 * dIdsa_dVg + Idsa * dT1_dVg;
Gbd = T1 * dIdsa_dVd + Idsa * dT1_dVd;
Gbb = T1 * dIdsa_dVb + Idsa * dT1_dVb;
Gbd += Gbg * dVgsteff_dVd;
Gbb += Gbg * dVgsteff_dVb;
Gbg *= dVgsteff_dVg;
Gbb *= dVbseff_dVb; /* bug fixing */
}
cdrain = Ids;
here->BSIM3v32gds = Gds;
here->BSIM3v32gm = Gm;
here->BSIM3v32gmbs = Gmb;
here->BSIM3v32gbbs = Gbb;
here->BSIM3v32gbgs = Gbg;
here->BSIM3v32gbds = Gbd;
here->BSIM3v32csub = Isub;
/* BSIM3v32 thermal noise Qinv calculated from all capMod
* 0, 1, 2 & 3 stored in here->BSIM3v32qinv 1/1998 */
if ((model->BSIM3v32xpart < 0) || (!ChargeComputationNeeded))
{ qgate = qdrn = qsrc = qbulk = 0.0;
here->BSIM3v32cggb = here->BSIM3v32cgsb = here->BSIM3v32cgdb = 0.0;
here->BSIM3v32cdgb = here->BSIM3v32cdsb = here->BSIM3v32cddb = 0.0;
here->BSIM3v32cbgb = here->BSIM3v32cbsb = here->BSIM3v32cbdb = 0.0;
here->BSIM3v32cqdb = here->BSIM3v32cqsb = here->BSIM3v32cqgb
= here->BSIM3v32cqbb = 0.0;
here->BSIM3v32gtau = 0.0;
goto finished;
}
else if (model->BSIM3v32capMod == 0)
{
if (Vbseff < 0.0)
{ Vbseff = Vbs;
dVbseff_dVb = 1.0;
}
else
{ Vbseff = pParam->BSIM3v32phi - Phis;
dVbseff_dVb = -dPhis_dVb;
}
Vfb = pParam->BSIM3v32vfbcv;
Vth = Vfb + pParam->BSIM3v32phi + pParam->BSIM3v32k1ox * sqrtPhis;
Vgst = Vgs_eff - Vth;
dVth_dVb = pParam->BSIM3v32k1ox * dsqrtPhis_dVb;
dVgst_dVb = -dVth_dVb;
dVgst_dVg = dVgs_eff_dVg;
CoxWL = model->BSIM3v32cox * pParam->BSIM3v32weffCV
* pParam->BSIM3v32leffCV;
Arg1 = Vgs_eff - Vbseff - Vfb;
if (Arg1 <= 0.0)
{ qgate = CoxWL * Arg1;
qbulk = -qgate;
qdrn = 0.0;
here->BSIM3v32cggb = CoxWL * dVgs_eff_dVg;
here->BSIM3v32cgdb = 0.0;
here->BSIM3v32cgsb = CoxWL * (dVbseff_dVb - dVgs_eff_dVg);
here->BSIM3v32cdgb = 0.0;
here->BSIM3v32cddb = 0.0;
here->BSIM3v32cdsb = 0.0;
here->BSIM3v32cbgb = -CoxWL * dVgs_eff_dVg;
here->BSIM3v32cbdb = 0.0;
here->BSIM3v32cbsb = -here->BSIM3v32cgsb;
here->BSIM3v32qinv = 0.0;
}
else if (Vgst <= 0.0)
{ T1 = 0.5 * pParam->BSIM3v32k1ox;
T2 = sqrt(T1 * T1 + Arg1);
qgate = CoxWL * pParam->BSIM3v32k1ox * (T2 - T1);
qbulk = -qgate;
qdrn = 0.0;
T0 = CoxWL * T1 / T2;
here->BSIM3v32cggb = T0 * dVgs_eff_dVg;
here->BSIM3v32cgdb = 0.0;
here->BSIM3v32cgsb = T0 * (dVbseff_dVb - dVgs_eff_dVg);
here->BSIM3v32cdgb = 0.0;
here->BSIM3v32cddb = 0.0;
here->BSIM3v32cdsb = 0.0;
here->BSIM3v32cbgb = -here->BSIM3v32cggb;
here->BSIM3v32cbdb = 0.0;
here->BSIM3v32cbsb = -here->BSIM3v32cgsb;
here->BSIM3v32qinv = 0.0;
}
else
{ One_Third_CoxWL = CoxWL / 3.0;
Two_Third_CoxWL = 2.0 * One_Third_CoxWL;
AbulkCV = Abulk0 * pParam->BSIM3v32abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM3v32abulkCVfactor * dAbulk0_dVb;
Vdsat = Vgst / AbulkCV;
dVdsat_dVg = dVgs_eff_dVg / AbulkCV;
dVdsat_dVb = - (Vdsat * dAbulkCV_dVb + dVth_dVb)/ AbulkCV;
if (model->BSIM3v32xpart > 0.5)
{ /* 0/100 Charge partition model */
if (Vdsat <= Vds)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM3v32phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.0;
here->BSIM3v32cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM3v32cgsb = -(here->BSIM3v32cggb + T2);
here->BSIM3v32cgdb = 0.0;
here->BSIM3v32cdgb = 0.0;
here->BSIM3v32cddb = 0.0;
here->BSIM3v32cdsb = 0.0;
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM3v32cbsb = -(here->BSIM3v32cbgb + T3);
here->BSIM3v32cbdb = 0.0;
here->BSIM3v32qinv = -(qgate + qbulk);
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
T7 = 2.0 * Vds - T1 - 3.0 * T3;
T8 = T3 - T1 - 2.0 * Vds;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM3v32phi - 0.5 * (Vds - T3));
T10 = T4 * T8;
qdrn = T4 * T7;
qbulk = -(qgate + qdrn + T10);
T5 = T3 / T1;
here->BSIM3v32cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
T11 = -CoxWL * T5 * dVdsat_dVb;
here->BSIM3v32cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM3v32cgsb = -(here->BSIM3v32cggb + T11
+ here->BSIM3v32cgdb);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T7 = T9 * T7;
T8 = T9 * T8;
T9 = 2.0 * T4 * (1.0 - 3.0 * T5);
here->BSIM3v32cdgb = (T7 * dAlphaz_dVg - T9
* dVdsat_dVg) * dVgs_eff_dVg;
T12 = T7 * dAlphaz_dVb - T9 * dVdsat_dVb;
here->BSIM3v32cddb = T4 * (3.0 - 6.0 * T2 - 3.0 * T5);
here->BSIM3v32cdsb = -(here->BSIM3v32cdgb + T12
+ here->BSIM3v32cddb);
T9 = 2.0 * T4 * (1.0 + T5);
T10 = (T8 * dAlphaz_dVg - T9 * dVdsat_dVg)
* dVgs_eff_dVg;
T11 = T8 * dAlphaz_dVb - T9 * dVdsat_dVb;
T12 = T4 * (2.0 * T2 + T5 - 1.0);
T0 = -(T10 + T11 + T12);
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
+ here->BSIM3v32cdgb + T10);
here->BSIM3v32cbdb = -(here->BSIM3v32cgdb
+ here->BSIM3v32cddb + T12);
here->BSIM3v32cbsb = -(here->BSIM3v32cgsb
+ here->BSIM3v32cdsb + T0);
here->BSIM3v32qinv = -(qgate + qbulk);
}
}
else if (model->BSIM3v32xpart < 0.5)
{ /* 40/60 Charge partition model */
if (Vds >= Vdsat)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM3v32phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.4 * T2;
here->BSIM3v32cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM3v32cgsb = -(here->BSIM3v32cggb + T2);
here->BSIM3v32cgdb = 0.0;
T3 = 0.4 * Two_Third_CoxWL;
here->BSIM3v32cdgb = -T3 * dVgs_eff_dVg;
here->BSIM3v32cddb = 0.0;
T4 = T3 * dVth_dVb;
here->BSIM3v32cdsb = -(T4 + here->BSIM3v32cdgb);
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM3v32cbsb = -(here->BSIM3v32cbgb + T3);
here->BSIM3v32cbdb = 0.0;
here->BSIM3v32qinv = -(qgate + qbulk);
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3v32phi
- 0.5 * (Vds - T3));
T5 = T3 / T1;
here->BSIM3v32cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
tmp = -CoxWL * T5 * dVdsat_dVb;
here->BSIM3v32cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM3v32cgsb = -(here->BSIM3v32cggb
+ here->BSIM3v32cgdb + tmp);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T6 = 8.0 * Vdsat * Vdsat - 6.0 * Vdsat * Vds
+ 1.2 * Vds * Vds;
T8 = T2 / T1;
T7 = Vds - T1 - T8 * T6;
qdrn = T4 * T7;
T7 *= T9;
tmp = T8 / T1;
tmp1 = T4 * (2.0 - 4.0 * tmp * T6
+ T8 * (16.0 * Vdsat - 6.0 * Vds));
here->BSIM3v32cdgb = (T7 * dAlphaz_dVg - tmp1
* dVdsat_dVg) * dVgs_eff_dVg;
T10 = T7 * dAlphaz_dVb - tmp1 * dVdsat_dVb;
here->BSIM3v32cddb = T4 * (2.0 - (1.0 / (3.0 * T1
* T1) + 2.0 * tmp) * T6 + T8
* (6.0 * Vdsat - 2.4 * Vds));
here->BSIM3v32cdsb = -(here->BSIM3v32cdgb
+ T10 + here->BSIM3v32cddb);
T7 = 2.0 * (T1 + T3);
qbulk = -(qgate - T4 * T7);
T7 *= T9;
T0 = 4.0 * T4 * (1.0 - T5);
T12 = (-T7 * dAlphaz_dVg - here->BSIM3v32cdgb
- T0 * dVdsat_dVg) * dVgs_eff_dVg;
T11 = -T7 * dAlphaz_dVb - T10 - T0 * dVdsat_dVb;
T10 = -4.0 * T4 * (T2 - 0.5 + 0.5 * T5)
- here->BSIM3v32cddb;
tmp = -(T10 + T11 + T12);
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
+ here->BSIM3v32cdgb + T12);
here->BSIM3v32cbdb = -(here->BSIM3v32cgdb
+ here->BSIM3v32cddb + T10); /* bug fix */
here->BSIM3v32cbsb = -(here->BSIM3v32cgsb
+ here->BSIM3v32cdsb + tmp);
here->BSIM3v32qinv = -(qgate + qbulk);
}
}
else
{ /* 50/50 partitioning */
if (Vds >= Vdsat)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM3v32phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.5 * T2;
here->BSIM3v32cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM3v32cgsb = -(here->BSIM3v32cggb + T2);
here->BSIM3v32cgdb = 0.0;
here->BSIM3v32cdgb = -One_Third_CoxWL * dVgs_eff_dVg;
here->BSIM3v32cddb = 0.0;
T4 = One_Third_CoxWL * dVth_dVb;
here->BSIM3v32cdsb = -(T4 + here->BSIM3v32cdgb);
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM3v32cbsb = -(here->BSIM3v32cbgb + T3);
here->BSIM3v32cbdb = 0.0;
here->BSIM3v32qinv = -(qgate + qbulk);
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM3v32phi
- 0.5 * (Vds - T3));
T5 = T3 / T1;
here->BSIM3v32cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
tmp = -CoxWL * T5 * dVdsat_dVb;
here->BSIM3v32cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM3v32cgsb = -(here->BSIM3v32cggb
+ here->BSIM3v32cgdb + tmp);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T7 = T1 + T3;
qdrn = -T4 * T7;
qbulk = - (qgate + qdrn + qdrn);
T7 *= T9;
T0 = T4 * (2.0 * T5 - 2.0);
here->BSIM3v32cdgb = (T0 * dVdsat_dVg - T7
* dAlphaz_dVg) * dVgs_eff_dVg;
T12 = T0 * dVdsat_dVb - T7 * dAlphaz_dVb;
here->BSIM3v32cddb = T4 * (1.0 - 2.0 * T2 - T5);
here->BSIM3v32cdsb = -(here->BSIM3v32cdgb + T12
+ here->BSIM3v32cddb);
here->BSIM3v32cbgb = -(here->BSIM3v32cggb
+ 2.0 * here->BSIM3v32cdgb);
here->BSIM3v32cbdb = -(here->BSIM3v32cgdb
+ 2.0 * here->BSIM3v32cddb);
here->BSIM3v32cbsb = -(here->BSIM3v32cgsb
+ 2.0 * here->BSIM3v32cdsb);
here->BSIM3v32qinv = -(qgate + qbulk);
}
}
}
}
else
{ if (Vbseff < 0.0)
{ VbseffCV = Vbseff;
dVbseffCV_dVb = 1.0;
}
else
{ VbseffCV = pParam->BSIM3v32phi - Phis;
dVbseffCV_dVb = -dPhis_dVb;
}
CoxWL = model->BSIM3v32cox * pParam->BSIM3v32weffCV
* pParam->BSIM3v32leffCV;
/* Seperate VgsteffCV with noff and voffcv */
noff = n * pParam->BSIM3v32noff;
dnoff_dVd = pParam->BSIM3v32noff * dn_dVd;
dnoff_dVb = pParam->BSIM3v32noff * dn_dVb;
T0 = Vtm * noff;
voffcv = pParam->BSIM3v32voffcv;
VgstNVt = (Vgst - voffcv) / T0;
if (VgstNVt > EXP_THRESHOLD)
{ Vgsteff = Vgst - voffcv;
dVgsteff_dVg = dVgs_eff_dVg;
dVgsteff_dVd = -dVth_dVd;
dVgsteff_dVb = -dVth_dVb;
}
else if (VgstNVt < -EXP_THRESHOLD)
{ Vgsteff = T0 * log(1.0 + MIN_EXP);
dVgsteff_dVg = 0.0;
dVgsteff_dVd = Vgsteff / noff;
dVgsteff_dVb = dVgsteff_dVd * dnoff_dVb;
dVgsteff_dVd *= dnoff_dVd;
}
else
{ ExpVgst = exp(VgstNVt);
Vgsteff = T0 * log(1.0 + ExpVgst);
dVgsteff_dVg = ExpVgst / (1.0 + ExpVgst);
dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + (Vgst - voffcv)
/ noff * dnoff_dVd) + Vgsteff / noff * dnoff_dVd;
dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + (Vgst - voffcv)
/ noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb;
dVgsteff_dVg *= dVgs_eff_dVg;
} /* End of VgsteffCV */
if (model->BSIM3v32capMod == 1)
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Vfb = here->BSIM3v32vfbzb;
break;
case BSIM3v32V32:
Vfb = here->BSIM3v32vfbzb;
dVfb_dVb = dVfb_dVd = 0.0;
break;
default:
Vfb = Vth - pParam->BSIM3v32phi - pParam->BSIM3v32k1ox * sqrtPhis;
dVfb_dVb = dVth_dVb - pParam->BSIM3v32k1ox * dsqrtPhis_dVb;
dVfb_dVd = dVth_dVd;
}
Arg1 = Vgs_eff - VbseffCV - Vfb - Vgsteff;
if (Arg1 <= 0.0)
{ qgate = CoxWL * Arg1;
Cgg = CoxWL * (dVgs_eff_dVg - dVgsteff_dVg);
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Cgd = -CoxWL * dVgsteff_dVd;
Cgb = -CoxWL * (dVbseffCV_dVb + dVgsteff_dVb);
break;
case BSIM3v32V32:
default:
Cgd = -CoxWL * (dVfb_dVd + dVgsteff_dVd);
Cgb = -CoxWL * (dVfb_dVb + dVbseffCV_dVb + dVgsteff_dVb);
}
}
else
{ T0 = 0.5 * pParam->BSIM3v32k1ox;
T1 = sqrt(T0 * T0 + Arg1);
T2 = CoxWL * T0 / T1;
qgate = CoxWL * pParam->BSIM3v32k1ox * (T1 - T0);
Cgg = T2 * (dVgs_eff_dVg - dVgsteff_dVg);
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Cgd = -T2 * dVgsteff_dVd;
Cgb = -T2 * (dVbseffCV_dVb + dVgsteff_dVb);
break;
case BSIM3v32V32:
default:
Cgd = -T2 * (dVfb_dVd + dVgsteff_dVd);
Cgb = -T2 * (dVfb_dVb + dVbseffCV_dVb + dVgsteff_dVb);
}
}
qbulk = -qgate;
Cbg = -Cgg;
Cbd = -Cgd;
Cbb = -Cgb;
One_Third_CoxWL = CoxWL / 3.0;
Two_Third_CoxWL = 2.0 * One_Third_CoxWL;
AbulkCV = Abulk0 * pParam->BSIM3v32abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM3v32abulkCVfactor * dAbulk0_dVb;
VdsatCV = Vgsteff / AbulkCV;
if (VdsatCV < Vds)
{ dVdsatCV_dVg = 1.0 / AbulkCV;
dVdsatCV_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV;
T0 = Vgsteff - VdsatCV / 3.0;
dT0_dVg = 1.0 - dVdsatCV_dVg / 3.0;
dT0_dVb = -dVdsatCV_dVb / 3.0;
qgate += CoxWL * T0;
Cgg1 = CoxWL * dT0_dVg;
Cgb1 = CoxWL * dT0_dVb + Cgg1 * dVgsteff_dVb;
Cgd1 = Cgg1 * dVgsteff_dVd;
Cgg1 *= dVgsteff_dVg;
Cgg += Cgg1;
Cgb += Cgb1;
Cgd += Cgd1;
T0 = VdsatCV - Vgsteff;
dT0_dVg = dVdsatCV_dVg - 1.0;
dT0_dVb = dVdsatCV_dVb;
qbulk += One_Third_CoxWL * T0;
Cbg1 = One_Third_CoxWL * dT0_dVg;
Cbb1 = One_Third_CoxWL * dT0_dVb + Cbg1 * dVgsteff_dVb;
Cbd1 = Cbg1 * dVgsteff_dVd;
Cbg1 *= dVgsteff_dVg;
Cbg += Cbg1;
Cbb += Cbb1;
Cbd += Cbd1;
if (model->BSIM3v32xpart > 0.5)
T0 = -Two_Third_CoxWL;
else if (model->BSIM3v32xpart < 0.5)
T0 = -0.4 * CoxWL;
else
T0 = -One_Third_CoxWL;
qsrc = T0 * Vgsteff;
Csg = T0 * dVgsteff_dVg;
Csb = T0 * dVgsteff_dVb;
Csd = T0 * dVgsteff_dVd;
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
}
else
{ T0 = AbulkCV * Vds;
T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.e-20);
T2 = Vds / T1;
T3 = T0 * T2;
dT3_dVg = -12.0 * T2 * T2 * AbulkCV;
dT3_dVd = 6.0 * T0 * (4.0 * Vgsteff - T0) / T1 / T1 - 0.5;
dT3_dVb = 12.0 * T2 * T2 * dAbulkCV_dVb * Vgsteff;
qgate += CoxWL * (Vgsteff - 0.5 * Vds + T3);
Cgg1 = CoxWL * (1.0 + dT3_dVg);
Cgb1 = CoxWL * dT3_dVb + Cgg1 * dVgsteff_dVb;
Cgd1 = CoxWL * dT3_dVd + Cgg1 * dVgsteff_dVd;
Cgg1 *= dVgsteff_dVg;
Cgg += Cgg1;
Cgb += Cgb1;
Cgd += Cgd1;
qbulk += CoxWL * (1.0 - AbulkCV) * (0.5 * Vds - T3);
Cbg1 = -CoxWL * ((1.0 - AbulkCV) * dT3_dVg);
Cbb1 = -CoxWL * ((1.0 - AbulkCV) * dT3_dVb
+ (0.5 * Vds - T3) * dAbulkCV_dVb)
+ Cbg1 * dVgsteff_dVb;
Cbd1 = -CoxWL * (1.0 - AbulkCV) * dT3_dVd
+ Cbg1 * dVgsteff_dVd;
Cbg1 *= dVgsteff_dVg;
Cbg += Cbg1;
Cbb += Cbb1;
Cbd += Cbd1;
if (model->BSIM3v32xpart > 0.5)
{ /* 0/100 Charge petition model */
T1 = T1 + T1;
qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0
- T0 * T0 / T1);
Csg = -CoxWL * (0.5 + 24.0 * T0 * Vds / T1 / T1
* AbulkCV);
Csb = -CoxWL * (0.25 * Vds * dAbulkCV_dVb
- 12.0 * T0 * Vds / T1 / T1 * (4.0 * Vgsteff - T0)
* dAbulkCV_dVb) + Csg * dVgsteff_dVb;
Csd = -CoxWL * (0.25 * AbulkCV - 12.0 * AbulkCV * T0
/ T1 / T1 * (4.0 * Vgsteff - T0))
+ Csg * dVgsteff_dVd;
Csg *= dVgsteff_dVg;
}
else if (model->BSIM3v32xpart < 0.5)
{ /* 40/60 Charge petition model */
T1 = T1 / 12.0;
T2 = 0.5 * CoxWL / (T1 * T1);
T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff
* (Vgsteff - 4.0 * T0 / 3.0))
- 2.0 * T0 * T0 * T0 / 15.0;
qsrc = -T2 * T3;
T4 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0)
+ 0.4 * T0 * T0;
Csg = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0
* Vgsteff - 8.0 * T0 / 3.0)
+ 2.0 * T0 * T0 / 3.0);
Csb = (qsrc / T1 * Vds + T2 * T4 * Vds) * dAbulkCV_dVb
+ Csg * dVgsteff_dVb;
Csd = (qsrc / T1 + T2 * T4) * AbulkCV
+ Csg * dVgsteff_dVd;
Csg *= dVgsteff_dVg;
}
else
{ /* 50/50 Charge petition model */
qsrc = -0.5 * (qgate + qbulk);
Csg = -0.5 * (Cgg1 + Cbg1);
Csb = -0.5 * (Cgb1 + Cbb1);
Csd = -0.5 * (Cgd1 + Cbd1);
}
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
}
qdrn = -(qgate + qbulk + qsrc);
here->BSIM3v32cggb = Cgg;
here->BSIM3v32cgsb = -(Cgg + Cgd + Cgb);
here->BSIM3v32cgdb = Cgd;
here->BSIM3v32cdgb = -(Cgg + Cbg + Csg);
here->BSIM3v32cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb
+ Csg + Csd + Csb);
here->BSIM3v32cddb = -(Cgd + Cbd + Csd);
here->BSIM3v32cbgb = Cbg;
here->BSIM3v32cbsb = -(Cbg + Cbd + Cbb);
here->BSIM3v32cbdb = Cbd;
here->BSIM3v32qinv = -(qgate + qbulk);
}
else if (model->BSIM3v32capMod == 2)
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Vfb = here->BSIM3v32vfbzb;
break;
case BSIM3v32V32:
Vfb = here->BSIM3v32vfbzb;
dVfb_dVb = dVfb_dVd = 0.0;
break;
default: /* old code prior to v3.2 */
Vfb = Vth - pParam->BSIM3v32phi - pParam->BSIM3v32k1ox * sqrtPhis;
dVfb_dVb = dVth_dVb - pParam->BSIM3v32k1ox * dsqrtPhis_dVb;
dVfb_dVd = dVth_dVd;
}
V3 = Vfb - Vgs_eff + VbseffCV - DELTA_3;
if (Vfb <= 0.0)
{ T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb);
T2 = -DELTA_3 / T0;
}
else
{ T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb);
T2 = DELTA_3 / T0;
}
T1 = 0.5 * (1.0 + V3 / T0);
Vfbeff = Vfb - 0.5 * (V3 + T0);
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
/* Do nothing */
break;
case BSIM3v32V32:
default:
dVfbeff_dVd = (1.0 - T1 - T2) * dVfb_dVd;
}
dVfbeff_dVg = T1 * dVgs_eff_dVg;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
dVfbeff_dVb = -T1 * dVbseffCV_dVb;
break;
case BSIM3v32V32:
default:
dVfbeff_dVb = (1.0 - T1 - T2) * dVfb_dVb - T1 * dVbseffCV_dVb;
}
Qac0 = CoxWL * (Vfbeff - Vfb);
dQac0_dVg = CoxWL * dVfbeff_dVg;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
/* Do nothing */
break;
case BSIM3v32V32:
default:
dQac0_dVd = CoxWL * (dVfbeff_dVd - dVfb_dVd);
}
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
dQac0_dVb = CoxWL * dVfbeff_dVb;
break;
case BSIM3v32V32:
default:
dQac0_dVb = CoxWL * (dVfbeff_dVb - dVfb_dVb);
}
T0 = 0.5 * pParam->BSIM3v32k1ox;
T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff;
if (pParam->BSIM3v32k1ox == 0.0)
{ T1 = 0.0;
T2 = 0.0;
}
else if (T3 < 0.0)
{ T1 = T0 + T3 / pParam->BSIM3v32k1ox;
T2 = CoxWL;
}
else
{ T1 = sqrt(T0 * T0 + T3);
T2 = CoxWL * T0 / T1;
}
Qsub0 = CoxWL * pParam->BSIM3v32k1ox * (T1 - T0);
dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg);
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
dQsub0_dVd = -T2 * dVgsteff_dVd;
break;
case BSIM3v32V32:
default:
dQsub0_dVd = -T2 * (dVfbeff_dVd + dVgsteff_dVd);
}
dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb
+ dVgsteff_dVb);
AbulkCV = Abulk0 * pParam->BSIM3v32abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM3v32abulkCVfactor * dAbulk0_dVb;
VdsatCV = Vgsteff / AbulkCV;
V4 = VdsatCV - Vds - DELTA_4;
T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV);
VdseffCV = VdsatCV - 0.5 * (V4 + T0);
T1 = 0.5 * (1.0 + V4 / T0);
T2 = DELTA_4 / T0;
T3 = (1.0 - T1 - T2) / AbulkCV;
dVdseffCV_dVg = T3;
dVdseffCV_dVd = T1;
dVdseffCV_dVb = -T3 * VdsatCV * dAbulkCV_dVb;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
/* Added to eliminate non-zero VdseffCV at Vds=0.0 */
if (Vds == 0.0)
{
VdseffCV = 0.0;
dVdseffCV_dVg = 0.0;
dVdseffCV_dVb = 0.0;
}
break;
case BSIM3v32V32:
default:
/* Do nothing */
break;
}
T0 = AbulkCV * VdseffCV;
T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1e-20);
T2 = VdseffCV / T1;
T3 = T0 * T2;
T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV);
T5 = (6.0 * T0 * (4.0 * Vgsteff - T0) / (T1 * T1) - 0.5);
T6 = 12.0 * T2 * T2 * Vgsteff;
qinoi = -CoxWL * (Vgsteff - 0.5 * T0 + AbulkCV * T3);
qgate = CoxWL * (Vgsteff - 0.5 * VdseffCV + T3);
Cgg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Cgd1 = CoxWL * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd;
Cgb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cgg1 * dVgsteff_dVb;
Cgg1 *= dVgsteff_dVg;
T7 = 1.0 - AbulkCV;
qbulk = CoxWL * T7 * (0.5 * VdseffCV - T3);
T4 = -T7 * (T4 - 1.0);
T5 = -T7 * T5;
T6 = -(T7 * T6 + (0.5 * VdseffCV - T3));
Cbg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Cbd1 = CoxWL * T5 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd;
Cbb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cbg1 * dVgsteff_dVb;
Cbg1 *= dVgsteff_dVg;
if (model->BSIM3v32xpart > 0.5)
{ /* 0/100 Charge petition model */
T1 = T1 + T1;
qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0
- T0 * T0 / T1);
T7 = (4.0 * Vgsteff - T0) / (T1 * T1);
T4 = -(0.5 + 24.0 * T0 * T0 / (T1 * T1));
T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T0 * T7);
T6 = -(0.25 * VdseffCV - 12.0 * T0 * VdseffCV * T7);
Csg = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Csd = CoxWL * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd;
Csb = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb;
Csg *= dVgsteff_dVg;
}
else if (model->BSIM3v32xpart < 0.5)
{ /* 40/60 Charge petition model */
T1 = T1 / 12.0;
T2 = 0.5 * CoxWL / (T1 * T1);
T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff
* (Vgsteff - 4.0 * T0 / 3.0))
- 2.0 * T0 * T0 * T0 / 15.0;
qsrc = -T2 * T3;
T7 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0)
+ 0.4 * T0 * T0;
T4 = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0
* Vgsteff - 8.0 * T0 / 3.0)
+ 2.0 * T0 * T0 / 3.0);
T5 = (qsrc / T1 + T2 * T7) * AbulkCV;
T6 = (qsrc / T1 * VdseffCV + T2 * T7 * VdseffCV);
Csg = (T4 + T5 * dVdseffCV_dVg);
Csd = T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd;
Csb = (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb;
Csg *= dVgsteff_dVg;
}
else
{ /* 50/50 Charge petition model */
qsrc = -0.5 * (qgate + qbulk);
Csg = -0.5 * (Cgg1 + Cbg1);
Csb = -0.5 * (Cgb1 + Cbb1);
Csd = -0.5 * (Cgd1 + Cbd1);
}
qgate += Qac0 + Qsub0;
qbulk -= (Qac0 + Qsub0);
qdrn = -(qgate + qbulk + qsrc);
Cgg = dQac0_dVg + dQsub0_dVg + Cgg1;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Cgd = dQsub0_dVd + Cgd1;
break;
case BSIM3v32V32:
default:
Cgd = dQac0_dVd + dQsub0_dVd + Cgd1;
}
Cgb = dQac0_dVb + dQsub0_dVb + Cgb1;
Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
Cbd = Cbd1 - dQsub0_dVd;
break;
case BSIM3v32V32:
default:
Cbd = Cbd1 - dQac0_dVd - dQsub0_dVd;
}
Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb;
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
here->BSIM3v32cggb = Cgg;
here->BSIM3v32cgsb = -(Cgg + Cgd + Cgb);
here->BSIM3v32cgdb = Cgd;
here->BSIM3v32cdgb = -(Cgg + Cbg + Csg);
here->BSIM3v32cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb
+ Csg + Csd + Csb);
here->BSIM3v32cddb = -(Cgd + Cbd + Csd);
here->BSIM3v32cbgb = Cbg;
here->BSIM3v32cbsb = -(Cbg + Cbd + Cbb);
here->BSIM3v32cbdb = Cbd;
here->BSIM3v32qinv = qinoi;
}
/* New Charge-Thickness capMod (CTM) begins */
else if (model->BSIM3v32capMod == 3)
{ V3 = here->BSIM3v32vfbzb - Vgs_eff + VbseffCV - DELTA_3;
if (here->BSIM3v32vfbzb <= 0.0)
{ T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * here->BSIM3v32vfbzb);
T2 = -DELTA_3 / T0;
}
else
{ T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * here->BSIM3v32vfbzb);
T2 = DELTA_3 / T0;
}
T1 = 0.5 * (1.0 + V3 / T0);
Vfbeff = here->BSIM3v32vfbzb - 0.5 * (V3 + T0);
dVfbeff_dVg = T1 * dVgs_eff_dVg;
dVfbeff_dVb = -T1 * dVbseffCV_dVb;
Cox = model->BSIM3v32cox;
Tox = 1.0e8 * model->BSIM3v32tox;
T0 = (Vgs_eff - VbseffCV - here->BSIM3v32vfbzb) / Tox;
dT0_dVg = dVgs_eff_dVg / Tox;
dT0_dVb = -dVbseffCV_dVb / Tox;
tmp = T0 * pParam->BSIM3v32acde;
if ((-EXP_THRESHOLD < tmp) && (tmp < EXP_THRESHOLD))
{ Tcen = pParam->BSIM3v32ldeb * exp(tmp);
dTcen_dVg = pParam->BSIM3v32acde * Tcen;
dTcen_dVb = dTcen_dVg * dT0_dVb;
dTcen_dVg *= dT0_dVg;
}
else if (tmp <= -EXP_THRESHOLD)
{ Tcen = pParam->BSIM3v32ldeb * MIN_EXP;
dTcen_dVg = dTcen_dVb = 0.0;
}
else
{ Tcen = pParam->BSIM3v32ldeb * MAX_EXP;
dTcen_dVg = dTcen_dVb = 0.0;
}
LINK = 1.0e-3 * model->BSIM3v32tox;
V3 = pParam->BSIM3v32ldeb - Tcen - LINK;
V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->BSIM3v32ldeb);
Tcen = pParam->BSIM3v32ldeb - 0.5 * (V3 + V4);
T1 = 0.5 * (1.0 + V3 / V4);
dTcen_dVg *= T1;
dTcen_dVb *= T1;
Ccen = EPSSI / Tcen;
T2 = Cox / (Cox + Ccen);
Coxeff = T2 * Ccen;
T3 = -Ccen / Tcen;
dCoxeff_dVg = T2 * T2 * T3;
dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb;
dCoxeff_dVg *= dTcen_dVg;
CoxWLcen = CoxWL * Coxeff / Cox;
Qac0 = CoxWLcen * (Vfbeff - here->BSIM3v32vfbzb);
QovCox = Qac0 / Coxeff;
dQac0_dVg = CoxWLcen * dVfbeff_dVg
+ QovCox * dCoxeff_dVg;
dQac0_dVb = CoxWLcen * dVfbeff_dVb
+ QovCox * dCoxeff_dVb;
T0 = 0.5 * pParam->BSIM3v32k1ox;
T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff;
if (pParam->BSIM3v32k1ox == 0.0)
{ T1 = 0.0;
T2 = 0.0;
}
else if (T3 < 0.0)
{ T1 = T0 + T3 / pParam->BSIM3v32k1ox;
T2 = CoxWLcen;
}
else
{ T1 = sqrt(T0 * T0 + T3);
T2 = CoxWLcen * T0 / T1;
}
Qsub0 = CoxWLcen * pParam->BSIM3v32k1ox * (T1 - T0);
QovCox = Qsub0 / Coxeff;
dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg)
+ QovCox * dCoxeff_dVg;
dQsub0_dVd = -T2 * dVgsteff_dVd;
dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb)
+ QovCox * dCoxeff_dVb;
/* Gate-bias dependent delta Phis begins */
if (pParam->BSIM3v32k1ox <= 0.0)
{ Denomi = 0.25 * pParam->BSIM3v32moin * Vtm;
T0 = 0.5 * pParam->BSIM3v32sqrtPhi;
}
else
{ Denomi = pParam->BSIM3v32moin * Vtm
* pParam->BSIM3v32k1ox * pParam->BSIM3v32k1ox;
T0 = pParam->BSIM3v32k1ox * pParam->BSIM3v32sqrtPhi;
}
T1 = 2.0 * T0 + Vgsteff;
DeltaPhi = Vtm * log(1.0 + T1 * Vgsteff / Denomi);
dDeltaPhi_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff);
dDeltaPhi_dVd = dDeltaPhi_dVg * dVgsteff_dVd;
dDeltaPhi_dVb = dDeltaPhi_dVg * dVgsteff_dVb;
/* End of delta Phis */
T3 = 4.0 * (Vth - here->BSIM3v32vfbzb - pParam->BSIM3v32phi);
Tox += Tox;
if (T3 >= 0.0)
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
T0 = (Vgsteff + T3) / Tox;
dT0_dVd = (dVgsteff_dVd + 4.0 * dVth_dVd) / Tox;
dT0_dVb = (dVgsteff_dVb + 4.0 * dVth_dVb) / Tox;
break;
case BSIM3v32V32:
default:
T0 = (Vgsteff + T3) / Tox;
}
}
else
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
T0 = (Vgsteff + 1.0e-20) / Tox;
dT0_dVd = dVgsteff_dVd / Tox;
dT0_dVb = dVgsteff_dVb / Tox;
break;
case BSIM3v32V32:
default:
T0 = (Vgsteff + 1.0e-20) / Tox;
}
}
tmp = exp(0.7 * log(T0));
T1 = 1.0 + tmp;
T2 = 0.7 * tmp / (T0 * Tox);
Tcen = 1.9e-9 / T1;
dTcen_dVg = -1.9e-9 * T2 / T1 /T1;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
dTcen_dVd = Tox * dTcen_dVg;
dTcen_dVb = dTcen_dVd * dT0_dVb;
dTcen_dVd *= dT0_dVd;
break;
case BSIM3v32V32:
default:
dTcen_dVd = dTcen_dVg * (4.0 * dVth_dVd + dVgsteff_dVd);
dTcen_dVb = dTcen_dVg * (4.0 * dVth_dVb + dVgsteff_dVb);
}
dTcen_dVg *= dVgsteff_dVg;
Ccen = EPSSI / Tcen;
T0 = Cox / (Cox + Ccen);
Coxeff = T0 * Ccen;
T1 = -Ccen / Tcen;
dCoxeff_dVg = T0 * T0 * T1;
dCoxeff_dVd = dCoxeff_dVg * dTcen_dVd;
dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb;
dCoxeff_dVg *= dTcen_dVg;
CoxWLcen = CoxWL * Coxeff / Cox;
AbulkCV = Abulk0 * pParam->BSIM3v32abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM3v32abulkCVfactor * dAbulk0_dVb;
VdsatCV = (Vgsteff - DeltaPhi) / AbulkCV;
V4 = VdsatCV - Vds - DELTA_4;
T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV);
VdseffCV = VdsatCV - 0.5 * (V4 + T0);
T1 = 0.5 * (1.0 + V4 / T0);
T2 = DELTA_4 / T0;
T3 = (1.0 - T1 - T2) / AbulkCV;
T4 = T3 * ( 1.0 - dDeltaPhi_dVg);
dVdseffCV_dVg = T4;
dVdseffCV_dVd = T1;
dVdseffCV_dVb = -T3 * VdsatCV * dAbulkCV_dVb;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
case BSIM3v32V322:
/* Added to eliminate non-zero VdseffCV at Vds=0.0 */
if (Vds == 0.0)
{
VdseffCV = 0.0;
dVdseffCV_dVg = 0.0;
dVdseffCV_dVb = 0.0;
}
break;
case BSIM3v32V32:
default:
/* Do nothing */
break;
}
T0 = AbulkCV * VdseffCV;
T1 = Vgsteff - DeltaPhi;
T2 = 12.0 * (T1 - 0.5 * T0 + 1.0e-20);
T3 = T0 / T2;
T4 = 1.0 - 12.0 * T3 * T3;
T5 = AbulkCV * (6.0 * T0 * (4.0 * T1 - T0) / (T2 * T2) - 0.5);
T6 = T5 * VdseffCV / AbulkCV;
qgate = qinoi = CoxWLcen * (T1 - T0 * (0.5 - T3));
QovCox = qgate / Coxeff;
Cgg1 = CoxWLcen * (T4 * (1.0 - dDeltaPhi_dVg)
+ T5 * dVdseffCV_dVg);
Cgd1 = CoxWLcen * T5 * dVdseffCV_dVd + Cgg1
* dVgsteff_dVd + QovCox * dCoxeff_dVd;
Cgb1 = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cgg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Cgg1 = Cgg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg;
T7 = 1.0 - AbulkCV;
T8 = T2 * T2;
T9 = 12.0 * T7 * T0 * T0 / (T8 * AbulkCV);
T10 = T9 * (1.0 - dDeltaPhi_dVg);
T11 = -T7 * T5 / AbulkCV;
T12 = -(T9 * T1 / AbulkCV + VdseffCV * (0.5 - T0 / T2));
qbulk = CoxWLcen * T7 * (0.5 * VdseffCV - T0 * VdseffCV / T2);
QovCox = qbulk / Coxeff;
Cbg1 = CoxWLcen * (T10 + T11 * dVdseffCV_dVg);
Cbd1 = CoxWLcen * T11 * dVdseffCV_dVd + Cbg1
* dVgsteff_dVd + QovCox * dCoxeff_dVd;
Cbb1 = CoxWLcen * (T11 * dVdseffCV_dVb + T12 * dAbulkCV_dVb)
+ Cbg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Cbg1 = Cbg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg;
if (model->BSIM3v32xpart > 0.5)
{ /* 0/100 partition */
qsrc = -CoxWLcen * (T1 / 2.0 + T0 / 4.0
- 0.5 * T0 * T0 / T2);
QovCox = qsrc / Coxeff;
T2 += T2;
T3 = T2 * T2;
T7 = -(0.25 - 12.0 * T0 * (4.0 * T1 - T0) / T3);
T4 = -(0.5 + 24.0 * T0 * T0 / T3) * (1.0 - dDeltaPhi_dVg);
T5 = T7 * AbulkCV;
T6 = T7 * VdseffCV;
Csg = CoxWLcen * (T4 + T5 * dVdseffCV_dVg);
Csd = CoxWLcen * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd
+ QovCox * dCoxeff_dVd;
Csb = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg;
}
else if (model->BSIM3v32xpart < 0.5)
{ /* 40/60 partition */
T2 = T2 / 12.0;
T3 = 0.5 * CoxWLcen / (T2 * T2);
T4 = T1 * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0
* T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0;
qsrc = -T3 * T4;
QovCox = qsrc / Coxeff;
T8 = 4.0 / 3.0 * T1 * (T1 - T0) + 0.4 * T0 * T0;
T5 = -2.0 * qsrc / T2 - T3 * (T1 * (3.0 * T1 - 8.0
* T0 / 3.0) + 2.0 * T0 * T0 / 3.0);
T6 = AbulkCV * (qsrc / T2 + T3 * T8);
T7 = T6 * VdseffCV / AbulkCV;
Csg = T5 * (1.0 - dDeltaPhi_dVg) + T6 * dVdseffCV_dVg;
Csd = Csg * dVgsteff_dVd + T6 * dVdseffCV_dVd
+ QovCox * dCoxeff_dVd;
Csb = Csg * dVgsteff_dVb + T6 * dVdseffCV_dVb
+ T7 * dAbulkCV_dVb + QovCox * dCoxeff_dVb;
Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg;
}
else
{ /* 50/50 partition */
qsrc = -0.5 * qgate;
Csg = -0.5 * Cgg1;
Csd = -0.5 * Cgd1;
Csb = -0.5 * Cgb1;
}
qgate += Qac0 + Qsub0 - qbulk;
qbulk -= (Qac0 + Qsub0);
qdrn = -(qgate + qbulk + qsrc);
Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg;
Cbd = Cbd1 - dQsub0_dVd;
Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb;
Cgg = Cgg1 - Cbg;
Cgd = Cgd1 - Cbd;
Cgb = Cgb1 - Cbb;
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
here->BSIM3v32cggb = Cgg;
here->BSIM3v32cgsb = -(Cgg + Cgd + Cgb);
here->BSIM3v32cgdb = Cgd;
here->BSIM3v32cdgb = -(Cgg + Cbg + Csg);
here->BSIM3v32cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb
+ Csg + Csd + Csb);
here->BSIM3v32cddb = -(Cgd + Cbd + Csd);
here->BSIM3v32cbgb = Cbg;
here->BSIM3v32cbsb = -(Cbg + Cbd + Cbb);
here->BSIM3v32cbdb = Cbd;
here->BSIM3v32qinv = -qinoi;
} /* End of CTM */
}
finished:
/* Returning Values to Calling Routine */
/*
* COMPUTE EQUIVALENT DRAIN CURRENT SOURCE
*/
here->BSIM3v32qgate = qgate;
here->BSIM3v32qbulk = qbulk;
here->BSIM3v32qdrn = qdrn;
here->BSIM3v32cd = cdrain;
if (ChargeComputationNeeded)
{ /* charge storage elements
* bulk-drain and bulk-source depletion capacitances
* czbd : zero bias drain junction capacitance
* czbs : zero bias source junction capacitance
* czbdsw: zero bias drain junction sidewall capacitance
along field oxide
* czbssw: zero bias source junction sidewall capacitance
along field oxide
* czbdswg: zero bias drain junction sidewall capacitance
along gate side
* czbsswg: zero bias source junction sidewall capacitance
along gate side
*/
if (model->BSIM3v32acmMod == 0)
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
czbd = model->BSIM3v32unitAreaTempJctCap * here->BSIM3v32drainArea; /*bug fix */
czbs = model->BSIM3v32unitAreaTempJctCap * here->BSIM3v32sourceArea;
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
czbd = model->BSIM3v32unitAreaJctCap * here->BSIM3v32drainArea;
czbs = model->BSIM3v32unitAreaJctCap * here->BSIM3v32sourceArea;
}
if (here->BSIM3v32drainPerimeter < pParam->BSIM3v32weff)
{
czbdsw = 0.0;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
czbdswg = model->BSIM3v32unitLengthGateSidewallTempJctCap
* here->BSIM3v32drainPerimeter;
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
czbdswg = model->BSIM3v32unitLengthGateSidewallJctCap
* here->BSIM3v32drainPerimeter;
}
}
else
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
czbdsw = model->BSIM3v32unitLengthSidewallTempJctCap
* (here->BSIM3v32drainPerimeter - pParam->BSIM3v32weff);
czbdswg = model->BSIM3v32unitLengthGateSidewallTempJctCap
* pParam->BSIM3v32weff;
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
czbdsw = model->BSIM3v32unitLengthSidewallJctCap
* (here->BSIM3v32drainPerimeter - pParam->BSIM3v32weff);
czbdswg = model->BSIM3v32unitLengthGateSidewallJctCap
* pParam->BSIM3v32weff;
}
}
if (here->BSIM3v32sourcePerimeter < pParam->BSIM3v32weff)
{
czbssw = 0.0;
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
czbsswg = model->BSIM3v32unitLengthGateSidewallTempJctCap
* here->BSIM3v32sourcePerimeter;
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
czbsswg = model->BSIM3v32unitLengthGateSidewallJctCap
* here->BSIM3v32sourcePerimeter;
}
}
else
{
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
czbssw = model->BSIM3v32unitLengthSidewallTempJctCap
* (here->BSIM3v32sourcePerimeter - pParam->BSIM3v32weff);
czbsswg = model->BSIM3v32unitLengthGateSidewallTempJctCap
* pParam->BSIM3v32weff;
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
czbssw = model->BSIM3v32unitLengthSidewallJctCap
* (here->BSIM3v32sourcePerimeter - pParam->BSIM3v32weff);
czbsswg = model->BSIM3v32unitLengthGateSidewallJctCap
* pParam->BSIM3v32weff;
}
}
} else {
/* Added revision dependent code */
switch (model->BSIM3v32intVersion) {
case BSIM3v32V324:
case BSIM3v32V323:
error = ACM_junctionCapacitances(
model->BSIM3v32acmMod,
model->BSIM3v32calcacm,
here->BSIM3v32geo,
model->BSIM3v32hdif,
model->BSIM3v32wmlt,
here->BSIM3v32w,
model->BSIM3v32xw,
here->BSIM3v32drainAreaGiven,
here->BSIM3v32drainArea,
here->BSIM3v32drainPerimeterGiven,
here->BSIM3v32drainPerimeter,
here->BSIM3v32sourceAreaGiven,
here->BSIM3v32sourceArea,
here->BSIM3v32sourcePerimeterGiven,
here->BSIM3v32sourcePerimeter,
model->BSIM3v32unitAreaTempJctCap,
model->BSIM3v32unitLengthSidewallTempJctCap,
model->BSIM3v32unitLengthGateSidewallTempJctCap,
&czbd,
&czbdsw,
&czbdswg,
&czbs,
&czbssw,
&czbsswg
);
break;
case BSIM3v32V322:
case BSIM3v32V32:
default:
error = ACM_junctionCapacitances(
model->BSIM3v32acmMod,
model->BSIM3v32calcacm,
here->BSIM3v32geo,
model->BSIM3v32hdif,
model->BSIM3v32wmlt,
here->BSIM3v32w,
model->BSIM3v32xw,
here->BSIM3v32drainAreaGiven,
here->BSIM3v32drainArea,
here->BSIM3v32drainPerimeterGiven,
here->BSIM3v32drainPerimeter,
here->BSIM3v32sourceAreaGiven,
here->BSIM3v32sourceArea,
here->BSIM3v32sourcePerimeterGiven,
here->BSIM3v32sourcePerimeter,
model->BSIM3v32unitAreaJctCap,
model->BSIM3v32unitLengthSidewallJctCap,
model->BSIM3v32unitLengthGateSidewallJctCap,
&czbd,
&czbdsw,
&czbdswg,
&czbs,
&czbssw,
&czbsswg
);
}
if (error)
return(error);
}
MJ = model->BSIM3v32bulkJctBotGradingCoeff;
MJSW = model->BSIM3v32bulkJctSideGradingCoeff;
MJSWG = model->BSIM3v32bulkJctGateSideGradingCoeff;
/* Source Bulk Junction */
if (vbs == 0.0)
{ *(ckt->CKTstate0 + here->BSIM3v32qbs) = 0.0;
here->BSIM3v32capbs = czbs + czbssw + czbsswg;
}
else if (vbs < 0.0)
{ if (czbs > 0.0)
{ arg = 1.0 - vbs / model->BSIM3v32PhiB;
if (MJ == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJ * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbs) = model->BSIM3v32PhiB * czbs
* (1.0 - arg * sarg) / (1.0 - MJ);
here->BSIM3v32capbs = czbs * sarg;
}
else
{ *(ckt->CKTstate0 + here->BSIM3v32qbs) = 0.0;
here->BSIM3v32capbs = 0.0;
}
if (czbssw > 0.0)
{ arg = 1.0 - vbs / model->BSIM3v32PhiBSW;
if (MJSW == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSW * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbs) += model->BSIM3v32PhiBSW * czbssw
* (1.0 - arg * sarg) / (1.0 - MJSW);
here->BSIM3v32capbs += czbssw * sarg;
}
if (czbsswg > 0.0)
{ arg = 1.0 - vbs / model->BSIM3v32PhiBSWG;
if (MJSWG == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWG * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbs) += model->BSIM3v32PhiBSWG * czbsswg
* (1.0 - arg * sarg) / (1.0 - MJSWG);
here->BSIM3v32capbs += czbsswg * sarg;
}
}
else
{ T0 = czbs + czbssw + czbsswg;
T1 = vbs * (czbs * MJ / model->BSIM3v32PhiB + czbssw * MJSW
/ model->BSIM3v32PhiBSW + czbsswg * MJSWG / model->BSIM3v32PhiBSWG);
*(ckt->CKTstate0 + here->BSIM3v32qbs) = vbs * (T0 + 0.5 * T1);
here->BSIM3v32capbs = T0 + T1;
}
/* Drain Bulk Junction */
if (vbd == 0.0)
{ *(ckt->CKTstate0 + here->BSIM3v32qbd) = 0.0;
here->BSIM3v32capbd = czbd + czbdsw + czbdswg;
}
else if (vbd < 0.0)
{ if (czbd > 0.0)
{ arg = 1.0 - vbd / model->BSIM3v32PhiB;
if (MJ == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJ * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbd) = model->BSIM3v32PhiB * czbd
* (1.0 - arg * sarg) / (1.0 - MJ);
here->BSIM3v32capbd = czbd * sarg;
}
else
{ *(ckt->CKTstate0 + here->BSIM3v32qbd) = 0.0;
here->BSIM3v32capbd = 0.0;
}
if (czbdsw > 0.0)
{ arg = 1.0 - vbd / model->BSIM3v32PhiBSW;
if (MJSW == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSW * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbd) += model->BSIM3v32PhiBSW * czbdsw
* (1.0 - arg * sarg) / (1.0 - MJSW);
here->BSIM3v32capbd += czbdsw * sarg;
}
if (czbdswg > 0.0)
{ arg = 1.0 - vbd / model->BSIM3v32PhiBSWG;
if (MJSWG == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWG * log(arg));
*(ckt->CKTstate0 + here->BSIM3v32qbd) += model->BSIM3v32PhiBSWG * czbdswg
* (1.0 - arg * sarg) / (1.0 - MJSWG);
here->BSIM3v32capbd += czbdswg * sarg;
}
}
else
{ T0 = czbd + czbdsw + czbdswg;
T1 = vbd * (czbd * MJ / model->BSIM3v32PhiB + czbdsw * MJSW
/ model->BSIM3v32PhiBSW + czbdswg * MJSWG / model->BSIM3v32PhiBSWG);
*(ckt->CKTstate0 + here->BSIM3v32qbd) = vbd * (T0 + 0.5 * T1);
here->BSIM3v32capbd = T0 + T1;
}
}
/*
* check convergence
*/
if ((here->BSIM3v32off == 0) || (!(ckt->CKTmode & MODEINITFIX)))
{ if (Check == 1)
{ ckt->CKTnoncon++;
#ifndef NEWCONV
}
else
{ if (here->BSIM3v32mode >= 0)
{ Idtot = here->BSIM3v32cd + here->BSIM3v32csub - here->BSIM3v32cbd;
}
else
{ Idtot = here->BSIM3v32cd - here->BSIM3v32cbd;
}
tol = ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot))
+ ckt->CKTabstol;
if (fabs(cdhat - Idtot) >= tol)
{ ckt->CKTnoncon++;
}
else
{ Ibtot = here->BSIM3v32cbs + here->BSIM3v32cbd - here->BSIM3v32csub;
tol = ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot))
+ ckt->CKTabstol;
if (fabs(cbhat - Ibtot) > tol)
{ ckt->CKTnoncon++;
}
}
#endif /* NEWCONV */
}
}
*(ckt->CKTstate0 + here->BSIM3v32vbs) = vbs;
*(ckt->CKTstate0 + here->BSIM3v32vbd) = vbd;
*(ckt->CKTstate0 + here->BSIM3v32vgs) = vgs;
*(ckt->CKTstate0 + here->BSIM3v32vds) = vds;
*(ckt->CKTstate0 + here->BSIM3v32qdef) = qdef;
/* bulk and channel charge plus overlaps */
if (!ChargeComputationNeeded)
goto line850;
#ifndef NOBYPASS
line755:
#endif
/* NQS begins */
if (here->BSIM3v32nqsMod)
{ qcheq = -(qbulk + qgate);
here->BSIM3v32cqgb = -(here->BSIM3v32cggb + here->BSIM3v32cbgb);
here->BSIM3v32cqdb = -(here->BSIM3v32cgdb + here->BSIM3v32cbdb);
here->BSIM3v32cqsb = -(here->BSIM3v32cgsb + here->BSIM3v32cbsb);
here->BSIM3v32cqbb = -(here->BSIM3v32cqgb + here->BSIM3v32cqdb
+ here->BSIM3v32cqsb);
gtau_drift = fabs(here->BSIM3v32tconst * qcheq) * ScalingFactor;
T0 = pParam->BSIM3v32leffCV * pParam->BSIM3v32leffCV;
gtau_diff = 16.0 * here->BSIM3v32u0temp * model->BSIM3v32vtm / T0
* ScalingFactor;
here->BSIM3v32gtau = gtau_drift + gtau_diff;
}
if (model->BSIM3v32capMod == 0)
{
/* code merge -JX */
cgdo = pParam->BSIM3v32cgdo;
qgdo = pParam->BSIM3v32cgdo * vgd;
cgso = pParam->BSIM3v32cgso;
qgso = pParam->BSIM3v32cgso * vgs;
}
else if (model->BSIM3v32capMod == 1)
{ if (vgd < 0.0)
{ T1 = sqrt(1.0 - 4.0 * vgd / pParam->BSIM3v32ckappa);
cgdo = pParam->BSIM3v32cgdo + pParam->BSIM3v32weffCV
* pParam->BSIM3v32cgdl / T1;
qgdo = pParam->BSIM3v32cgdo * vgd - pParam->BSIM3v32weffCV * 0.5
* pParam->BSIM3v32cgdl * pParam->BSIM3v32ckappa * (T1 - 1.0);
}
else
{ cgdo = pParam->BSIM3v32cgdo + pParam->BSIM3v32weffCV
* pParam->BSIM3v32cgdl;
qgdo = (pParam->BSIM3v32weffCV * pParam->BSIM3v32cgdl
+ pParam->BSIM3v32cgdo) * vgd;
}
if (vgs < 0.0)
{ T1 = sqrt(1.0 - 4.0 * vgs / pParam->BSIM3v32ckappa);
cgso = pParam->BSIM3v32cgso + pParam->BSIM3v32weffCV
* pParam->BSIM3v32cgsl / T1;
qgso = pParam->BSIM3v32cgso * vgs - pParam->BSIM3v32weffCV * 0.5
* pParam->BSIM3v32cgsl * pParam->BSIM3v32ckappa * (T1 - 1.0);
}
else
{ cgso = pParam->BSIM3v32cgso + pParam->BSIM3v32weffCV
* pParam->BSIM3v32cgsl;
qgso = (pParam->BSIM3v32weffCV * pParam->BSIM3v32cgsl
+ pParam->BSIM3v32cgso) * vgs;
}
}
else
{ T0 = vgd + DELTA_1;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_1);
T2 = 0.5 * (T0 - T1);
T3 = pParam->BSIM3v32weffCV * pParam->BSIM3v32cgdl;
T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM3v32ckappa);
cgdo = pParam->BSIM3v32cgdo + T3 - T3 * (1.0 - 1.0 / T4)
* (0.5 - 0.5 * T0 / T1);
qgdo = (pParam->BSIM3v32cgdo + T3) * vgd - T3 * (T2
+ 0.5 * pParam->BSIM3v32ckappa * (T4 - 1.0));
T0 = vgs + DELTA_1;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_1);
T2 = 0.5 * (T0 - T1);
T3 = pParam->BSIM3v32weffCV * pParam->BSIM3v32cgsl;
T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM3v32ckappa);
cgso = pParam->BSIM3v32cgso + T3 - T3 * (1.0 - 1.0 / T4)
* (0.5 - 0.5 * T0 / T1);
qgso = (pParam->BSIM3v32cgso + T3) * vgs - T3 * (T2
+ 0.5 * pParam->BSIM3v32ckappa * (T4 - 1.0));
}
here->BSIM3v32cgdo = cgdo;
here->BSIM3v32cgso = cgso;
ag0 = ckt->CKTag[0];
if (here->BSIM3v32mode > 0)
{ if (here->BSIM3v32nqsMod == 0)
{ gcggb = (here->BSIM3v32cggb + cgdo + cgso
+ pParam->BSIM3v32cgbo ) * ag0;
gcgdb = (here->BSIM3v32cgdb - cgdo) * ag0;
gcgsb = (here->BSIM3v32cgsb - cgso) * ag0;
gcdgb = (here->BSIM3v32cdgb - cgdo) * ag0;
gcddb = (here->BSIM3v32cddb + here->BSIM3v32capbd + cgdo) * ag0;
gcdsb = here->BSIM3v32cdsb * ag0;
gcsgb = -(here->BSIM3v32cggb + here->BSIM3v32cbgb
+ here->BSIM3v32cdgb + cgso) * ag0;
gcsdb = -(here->BSIM3v32cgdb + here->BSIM3v32cbdb
+ here->BSIM3v32cddb) * ag0;
gcssb = (here->BSIM3v32capbs + cgso - (here->BSIM3v32cgsb
+ here->BSIM3v32cbsb + here->BSIM3v32cdsb)) * ag0;
gcbgb = (here->BSIM3v32cbgb - pParam->BSIM3v32cgbo) * ag0;
gcbdb = (here->BSIM3v32cbdb - here->BSIM3v32capbd) * ag0;
gcbsb = (here->BSIM3v32cbsb - here->BSIM3v32capbs) * ag0;
qgd = qgdo;
qgs = qgso;
qgb = pParam->BSIM3v32cgbo * vgb;
qgate += qgd + qgs + qgb;
qbulk -= qgb;
qdrn -= qgd;
qsrc = -(qgate + qbulk + qdrn);
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = 0.6;
dxpart = 0.4;
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
}
else
{ if (qcheq > 0.0)
T0 = here->BSIM3v32tconst * qdef * ScalingFactor;
else
T0 = -here->BSIM3v32tconst * qdef * ScalingFactor;
ggtg = here->BSIM3v32gtg = T0 * here->BSIM3v32cqgb;
ggtd = here->BSIM3v32gtd = T0 * here->BSIM3v32cqdb;
ggts = here->BSIM3v32gts = T0 * here->BSIM3v32cqsb;
ggtb = here->BSIM3v32gtb = T0 * here->BSIM3v32cqbb;
gqdef = ScalingFactor * ag0;
gcqgb = here->BSIM3v32cqgb * ag0;
gcqdb = here->BSIM3v32cqdb * ag0;
gcqsb = here->BSIM3v32cqsb * ag0;
gcqbb = here->BSIM3v32cqbb * ag0;
gcggb = (cgdo + cgso + pParam->BSIM3v32cgbo ) * ag0;
gcgdb = -cgdo * ag0;
gcgsb = -cgso * ag0;
gcdgb = -cgdo * ag0;
gcddb = (here->BSIM3v32capbd + cgdo) * ag0;
gcdsb = 0.0;
gcsgb = -cgso * ag0;
gcsdb = 0.0;
gcssb = (here->BSIM3v32capbs + cgso) * ag0;
gcbgb = -pParam->BSIM3v32cgbo * ag0;
gcbdb = -here->BSIM3v32capbd * ag0;
gcbsb = -here->BSIM3v32capbs * ag0;
CoxWL = model->BSIM3v32cox * pParam->BSIM3v32weffCV
* pParam->BSIM3v32leffCV;
if (fabs(qcheq) <= 1.0e-5 * CoxWL)
{ if (model->BSIM3v32xpart < 0.5)
{ dxpart = 0.4;
}
else if (model->BSIM3v32xpart > 0.5)
{ dxpart = 0.0;
}
else
{ dxpart = 0.5;
}
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb
= ddxpart_dVs = 0.0;
}
else
{ dxpart = qdrn / qcheq;
Cdd = here->BSIM3v32cddb;
Csd = -(here->BSIM3v32cgdb + here->BSIM3v32cddb
+ here->BSIM3v32cbdb);
ddxpart_dVd = (Cdd - dxpart * (Cdd + Csd)) / qcheq;
Cdg = here->BSIM3v32cdgb;
Csg = -(here->BSIM3v32cggb + here->BSIM3v32cdgb
+ here->BSIM3v32cbgb);
ddxpart_dVg = (Cdg - dxpart * (Cdg + Csg)) / qcheq;
Cds = here->BSIM3v32cdsb;
Css = -(here->BSIM3v32cgsb + here->BSIM3v32cdsb
+ here->BSIM3v32cbsb);
ddxpart_dVs = (Cds - dxpart * (Cds + Css)) / qcheq;
ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs);
}
sxpart = 1.0 - dxpart;
dsxpart_dVd = -ddxpart_dVd;
dsxpart_dVg = -ddxpart_dVg;
dsxpart_dVs = -ddxpart_dVs;
dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs);
qgd = qgdo;
qgs = qgso;
qgb = pParam->BSIM3v32cgbo * vgb;
qgate = qgd + qgs + qgb;
qbulk = -qgb;
qdrn = -qgd;
qsrc = -(qgate + qbulk + qdrn);
}
}
else
{ if (here->BSIM3v32nqsMod == 0)
{ gcggb = (here->BSIM3v32cggb + cgdo + cgso
+ pParam->BSIM3v32cgbo ) * ag0;
gcgdb = (here->BSIM3v32cgsb - cgdo) * ag0;
gcgsb = (here->BSIM3v32cgdb - cgso) * ag0;
gcdgb = -(here->BSIM3v32cggb + here->BSIM3v32cbgb
+ here->BSIM3v32cdgb + cgdo) * ag0;
gcddb = (here->BSIM3v32capbd + cgdo - (here->BSIM3v32cgsb
+ here->BSIM3v32cbsb + here->BSIM3v32cdsb)) * ag0;
gcdsb = -(here->BSIM3v32cgdb + here->BSIM3v32cbdb
+ here->BSIM3v32cddb) * ag0;
gcsgb = (here->BSIM3v32cdgb - cgso) * ag0;
gcsdb = here->BSIM3v32cdsb * ag0;
gcssb = (here->BSIM3v32cddb + here->BSIM3v32capbs + cgso) * ag0;
gcbgb = (here->BSIM3v32cbgb - pParam->BSIM3v32cgbo) * ag0;
gcbdb = (here->BSIM3v32cbsb - here->BSIM3v32capbd) * ag0;
gcbsb = (here->BSIM3v32cbdb - here->BSIM3v32capbs) * ag0;
qgd = qgdo;
qgs = qgso;
qgb = pParam->BSIM3v32cgbo * vgb;
qgate += qgd + qgs + qgb;
qbulk -= qgb;
qsrc = qdrn - qgs;
qdrn = -(qgate + qbulk + qsrc);
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = 0.4;
dxpart = 0.6;
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
}
else
{ if (qcheq > 0.0)
T0 = here->BSIM3v32tconst * qdef * ScalingFactor;
else
T0 = -here->BSIM3v32tconst * qdef * ScalingFactor;
ggtg = here->BSIM3v32gtg = T0 * here->BSIM3v32cqgb;
ggts = here->BSIM3v32gtd = T0 * here->BSIM3v32cqdb;
ggtd = here->BSIM3v32gts = T0 * here->BSIM3v32cqsb;
ggtb = here->BSIM3v32gtb = T0 * here->BSIM3v32cqbb;
gqdef = ScalingFactor * ag0;
gcqgb = here->BSIM3v32cqgb * ag0;
gcqdb = here->BSIM3v32cqsb * ag0;
gcqsb = here->BSIM3v32cqdb * ag0;
gcqbb = here->BSIM3v32cqbb * ag0;
gcggb = (cgdo + cgso + pParam->BSIM3v32cgbo) * ag0;
gcgdb = -cgdo * ag0;
gcgsb = -cgso * ag0;
gcdgb = -cgdo * ag0;
gcddb = (here->BSIM3v32capbd + cgdo) * ag0;
gcdsb = 0.0;
gcsgb = -cgso * ag0;
gcsdb = 0.0;
gcssb = (here->BSIM3v32capbs + cgso) * ag0;
gcbgb = -pParam->BSIM3v32cgbo * ag0;
gcbdb = -here->BSIM3v32capbd * ag0;
gcbsb = -here->BSIM3v32capbs * ag0;
CoxWL = model->BSIM3v32cox * pParam->BSIM3v32weffCV
* pParam->BSIM3v32leffCV;
if (fabs(qcheq) <= 1.0e-5 * CoxWL)
{ if (model->BSIM3v32xpart < 0.5)
{ sxpart = 0.4;
}
else if (model->BSIM3v32xpart > 0.5)
{ sxpart = 0.0;
}
else
{ sxpart = 0.5;
}
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb
= dsxpart_dVs = 0.0;
}
else
{ sxpart = qdrn / qcheq;
Css = here->BSIM3v32cddb;
Cds = -(here->BSIM3v32cgdb + here->BSIM3v32cddb
+ here->BSIM3v32cbdb);
dsxpart_dVs = (Css - sxpart * (Css + Cds)) / qcheq;
Csg = here->BSIM3v32cdgb;
Cdg = -(here->BSIM3v32cggb + here->BSIM3v32cdgb
+ here->BSIM3v32cbgb);
dsxpart_dVg = (Csg - sxpart * (Csg + Cdg)) / qcheq;
Csd = here->BSIM3v32cdsb;
Cdd = -(here->BSIM3v32cgsb + here->BSIM3v32cdsb
+ here->BSIM3v32cbsb);
dsxpart_dVd = (Csd - sxpart * (Csd + Cdd)) / qcheq;
dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs);
}
dxpart = 1.0 - sxpart;
ddxpart_dVd = -dsxpart_dVd;
ddxpart_dVg = -dsxpart_dVg;
ddxpart_dVs = -dsxpart_dVs;
ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs);
qgd = qgdo;
qgs = qgso;
qgb = pParam->BSIM3v32cgbo * vgb;
qgate = qgd + qgs + qgb;
qbulk = -qgb;
qsrc = -qgs;
qdrn = -(qgate + qbulk + qsrc);
}
}
cqdef = cqcheq = 0.0;
if (ByPass) goto line860;
*(ckt->CKTstate0 + here->BSIM3v32qg) = qgate;
*(ckt->CKTstate0 + here->BSIM3v32qd) = qdrn
- *(ckt->CKTstate0 + here->BSIM3v32qbd);
*(ckt->CKTstate0 + here->BSIM3v32qb) = qbulk
+ *(ckt->CKTstate0 + here->BSIM3v32qbd)
+ *(ckt->CKTstate0 + here->BSIM3v32qbs);
if (here->BSIM3v32nqsMod)
{ *(ckt->CKTstate0 + here->BSIM3v32qcdump) = qdef * ScalingFactor;
*(ckt->CKTstate0 + here->BSIM3v32qcheq) = qcheq;
}
/* store small signal parameters */
if (ckt->CKTmode & MODEINITSMSIG)
{ goto line1000;
}
if (!ChargeComputationNeeded)
goto line850;
if (ckt->CKTmode & MODEINITTRAN)
{ *(ckt->CKTstate1 + here->BSIM3v32qb) =
*(ckt->CKTstate0 + here->BSIM3v32qb);
*(ckt->CKTstate1 + here->BSIM3v32qg) =
*(ckt->CKTstate0 + here->BSIM3v32qg);
*(ckt->CKTstate1 + here->BSIM3v32qd) =
*(ckt->CKTstate0 + here->BSIM3v32qd);
if (here->BSIM3v32nqsMod)
{ *(ckt->CKTstate1 + here->BSIM3v32qcheq) =
*(ckt->CKTstate0 + here->BSIM3v32qcheq);
*(ckt->CKTstate1 + here->BSIM3v32qcdump) =
*(ckt->CKTstate0 + here->BSIM3v32qcdump);
}
}
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3v32qb);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3v32qg);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3v32qd);
if (error)
return(error);
if (here->BSIM3v32nqsMod)
{ error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3v32qcdump);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM3v32qcheq);
if (error)
return(error);
}
goto line860;
line850:
/* initialize to zero charge conductance and current */
ceqqg = ceqqb = ceqqd = 0.0;
cqcheq = cqdef = 0.0;
gcdgb = gcddb = gcdsb = 0.0;
gcsgb = gcsdb = gcssb = 0.0;
gcggb = gcgdb = gcgsb = 0.0;
gcbgb = gcbdb = gcbsb = 0.0;
gqdef = gcqgb = gcqdb = gcqsb = gcqbb = 0.0;
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = (1.0 - (dxpart = (here->BSIM3v32mode > 0) ? 0.4 : 0.6));
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
if (here->BSIM3v32nqsMod)
here->BSIM3v32gtau = 16.0 * here->BSIM3v32u0temp * model->BSIM3v32vtm
/ pParam->BSIM3v32leffCV / pParam->BSIM3v32leffCV
* ScalingFactor;
else
here->BSIM3v32gtau = 0.0;
goto line900;
line860:
/* evaluate equivalent charge current */
cqgate = *(ckt->CKTstate0 + here->BSIM3v32cqg);
cqbulk = *(ckt->CKTstate0 + here->BSIM3v32cqb);
cqdrn = *(ckt->CKTstate0 + here->BSIM3v32cqd);
ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs;
ceqqb = cqbulk - gcbgb * vgb + gcbdb * vbd + gcbsb * vbs;
ceqqd = cqdrn - gcdgb * vgb + gcddb * vbd + gcdsb * vbs;
if (here->BSIM3v32nqsMod)
{ T0 = ggtg * vgb - ggtd * vbd - ggts * vbs;
ceqqg += T0;
T1 = qdef * here->BSIM3v32gtau;
ceqqd -= dxpart * T0 + T1 * (ddxpart_dVg * vgb - ddxpart_dVd
* vbd - ddxpart_dVs * vbs);
cqdef = *(ckt->CKTstate0 + here->BSIM3v32cqcdump) - gqdef * qdef;
cqcheq = *(ckt->CKTstate0 + here->BSIM3v32cqcheq)
- (gcqgb * vgb - gcqdb * vbd - gcqsb * vbs) + T0;
}
if (ckt->CKTmode & MODEINITTRAN)
{ *(ckt->CKTstate1 + here->BSIM3v32cqb) =
*(ckt->CKTstate0 + here->BSIM3v32cqb);
*(ckt->CKTstate1 + here->BSIM3v32cqg) =
*(ckt->CKTstate0 + here->BSIM3v32cqg);
*(ckt->CKTstate1 + here->BSIM3v32cqd) =
*(ckt->CKTstate0 + here->BSIM3v32cqd);
if (here->BSIM3v32nqsMod)
{ *(ckt->CKTstate1 + here->BSIM3v32cqcheq) =
*(ckt->CKTstate0 + here->BSIM3v32cqcheq);
*(ckt->CKTstate1 + here->BSIM3v32cqcdump) =
*(ckt->CKTstate0 + here->BSIM3v32cqcdump);
}
}
/*
* load current vector
*/
line900:
if (here->BSIM3v32mode >= 0)
{ Gm = here->BSIM3v32gm;
Gmbs = here->BSIM3v32gmbs;
FwdSum = Gm + Gmbs;
RevSum = 0.0;
cdreq = model->BSIM3v32type * (cdrain - here->BSIM3v32gds * vds
- Gm * vgs - Gmbs * vbs);
ceqbd = -model->BSIM3v32type * (here->BSIM3v32csub
- here->BSIM3v32gbds * vds - here->BSIM3v32gbgs * vgs
- here->BSIM3v32gbbs * vbs);
ceqbs = 0.0;
gbbdp = -here->BSIM3v32gbds;
gbbsp = (here->BSIM3v32gbds + here->BSIM3v32gbgs + here->BSIM3v32gbbs);
gbdpg = here->BSIM3v32gbgs;
gbdpdp = here->BSIM3v32gbds;
gbdpb = here->BSIM3v32gbbs;
gbdpsp = -(gbdpg + gbdpdp + gbdpb);
gbspg = 0.0;
gbspdp = 0.0;
gbspb = 0.0;
gbspsp = 0.0;
}
else
{ Gm = -here->BSIM3v32gm;
Gmbs = -here->BSIM3v32gmbs;
FwdSum = 0.0;
RevSum = -(Gm + Gmbs);
cdreq = -model->BSIM3v32type * (cdrain + here->BSIM3v32gds * vds
+ Gm * vgd + Gmbs * vbd);
ceqbs = -model->BSIM3v32type * (here->BSIM3v32csub
+ here->BSIM3v32gbds * vds - here->BSIM3v32gbgs * vgd
- here->BSIM3v32gbbs * vbd);
ceqbd = 0.0;
gbbsp = -here->BSIM3v32gbds;
gbbdp = (here->BSIM3v32gbds + here->BSIM3v32gbgs + here->BSIM3v32gbbs);
gbdpg = 0.0;
gbdpsp = 0.0;
gbdpb = 0.0;
gbdpdp = 0.0;
gbspg = here->BSIM3v32gbgs;
gbspsp = here->BSIM3v32gbds;
gbspb = here->BSIM3v32gbbs;
gbspdp = -(gbspg + gbspsp + gbspb);
}
if (model->BSIM3v32type > 0)
{ ceqbs += (here->BSIM3v32cbs - here->BSIM3v32gbs * vbs);
ceqbd += (here->BSIM3v32cbd - here->BSIM3v32gbd * vbd);
/*
ceqqg = ceqqg;
ceqqb = ceqqb;
ceqqd = ceqqd;
cqdef = cqdef;
cqcheq = cqcheq;
*/
}
else
{ ceqbs -= (here->BSIM3v32cbs - here->BSIM3v32gbs * vbs);
ceqbd -= (here->BSIM3v32cbd - here->BSIM3v32gbd * vbd);
ceqqg = -ceqqg;
ceqqb = -ceqqb;
ceqqd = -ceqqd;
cqdef = -cqdef;
cqcheq = -cqcheq;
}
m = here->BSIM3v32m;
#ifdef USE_OMP
here->BSIM3v32rhsG = m * ceqqg;
here->BSIM3v32rhsB = m * (ceqbs + ceqbd + ceqqb);
here->BSIM3v32rhsD = m * (ceqbd - cdreq - ceqqd);
here->BSIM3v32rhsS = m * (cdreq + ceqbs + ceqqg
+ ceqqb + ceqqd);
if (here->BSIM3v32nqsMod)
here->BSIM3v32rhsQ = m * (cqcheq - cqdef);
#else
(*(ckt->CKTrhs + here->BSIM3v32gNode) -= m * ceqqg);
(*(ckt->CKTrhs + here->BSIM3v32bNode) -= m * (ceqbs + ceqbd + ceqqb));
(*(ckt->CKTrhs + here->BSIM3v32dNodePrime) += m * (ceqbd - cdreq - ceqqd));
(*(ckt->CKTrhs + here->BSIM3v32sNodePrime) += m * (cdreq + ceqbs + ceqqg
+ ceqqb + ceqqd));
if (here->BSIM3v32nqsMod)
*(ckt->CKTrhs + here->BSIM3v32qNode) += m * (cqcheq - cqdef);
#endif
/*
* load y matrix
*/
T1 = qdef * here->BSIM3v32gtau;
#ifdef USE_OMP
here->BSIM3v32DdPt = m * here->BSIM3v32drainConductance;
here->BSIM3v32GgPt = m * (gcggb - ggtg);
here->BSIM3v32SsPt = m * here->BSIM3v32sourceConductance;
here->BSIM3v32BbPt = m * (here->BSIM3v32gbd + here->BSIM3v32gbs
- gcbgb - gcbdb - gcbsb - here->BSIM3v32gbbs);
here->BSIM3v32DPdpPt = m * (here->BSIM3v32drainConductance
+ here->BSIM3v32gds + here->BSIM3v32gbd
+ RevSum + gcddb + dxpart * ggtd
+ T1 * ddxpart_dVd + gbdpdp);
here->BSIM3v32SPspPt = m * (here->BSIM3v32sourceConductance
+ here->BSIM3v32gds + here->BSIM3v32gbs
+ FwdSum + gcssb + sxpart * ggts
+ T1 * dsxpart_dVs + gbspsp);
here->BSIM3v32DdpPt = m * here->BSIM3v32drainConductance;
here->BSIM3v32GbPt = m * (gcggb + gcgdb + gcgsb + ggtb);
here->BSIM3v32GdpPt = m * (gcgdb - ggtd);
here->BSIM3v32GspPt = m * (gcgsb - ggts);
here->BSIM3v32SspPt = m * here->BSIM3v32sourceConductance;
here->BSIM3v32BgPt = m * (gcbgb - here->BSIM3v32gbgs);
here->BSIM3v32BdpPt = m * (gcbdb - here->BSIM3v32gbd + gbbdp);
here->BSIM3v32BspPt = m * (gcbsb - here->BSIM3v32gbs + gbbsp);
here->BSIM3v32DPdPt = m * here->BSIM3v32drainConductance;
here->BSIM3v32DPgPt = m * (Gm + gcdgb + dxpart * ggtg
+ T1 * ddxpart_dVg + gbdpg);
here->BSIM3v32DPbPt = m * (here->BSIM3v32gbd - Gmbs + gcdgb + gcddb
+ gcdsb - dxpart * ggtb
- T1 * ddxpart_dVb - gbdpb);
here->BSIM3v32DPspPt = m * (here->BSIM3v32gds + FwdSum - gcdsb
- dxpart * ggts - T1 * ddxpart_dVs - gbdpsp);
here->BSIM3v32SPgPt = m * (gcsgb - Gm + sxpart * ggtg
+ T1 * dsxpart_dVg + gbspg);
here->BSIM3v32SPsPt = m * here->BSIM3v32sourceConductance;
here->BSIM3v32SPbPt = m * (here->BSIM3v32gbs + Gmbs + gcsgb + gcsdb
+ gcssb - sxpart * ggtb
- T1 * dsxpart_dVb - gbspb);
here->BSIM3v32SPdpPt = m * (here->BSIM3v32gds + RevSum - gcsdb
- sxpart * ggtd - T1 * dsxpart_dVd - gbspdp);
if (here->BSIM3v32nqsMod)
{
here->BSIM3v32QqPt = m * (gqdef + here->BSIM3v32gtau);
here->BSIM3v32DPqPt = m * (dxpart * here->BSIM3v32gtau);
here->BSIM3v32SPqPt = m * (sxpart * here->BSIM3v32gtau);
here->BSIM3v32GqPt = m * here->BSIM3v32gtau;
here->BSIM3v32QgPt = m * (ggtg - gcqgb);
here->BSIM3v32QdpPt = m * (ggtd - gcqdb);
here->BSIM3v32QspPt = m * (ggts - gcqsb);
here->BSIM3v32QbPt = m * (ggtb - gcqbb);
}
#else
(*(here->BSIM3v32DdPtr) += m * here->BSIM3v32drainConductance);
(*(here->BSIM3v32GgPtr) += m * (gcggb - ggtg));
(*(here->BSIM3v32SsPtr) += m * here->BSIM3v32sourceConductance);
(*(here->BSIM3v32BbPtr) += m * (here->BSIM3v32gbd + here->BSIM3v32gbs
- gcbgb - gcbdb - gcbsb - here->BSIM3v32gbbs));
(*(here->BSIM3v32DPdpPtr) += m * (here->BSIM3v32drainConductance
+ here->BSIM3v32gds + here->BSIM3v32gbd
+ RevSum + gcddb + dxpart * ggtd
+ T1 * ddxpart_dVd + gbdpdp));
(*(here->BSIM3v32SPspPtr) += m * (here->BSIM3v32sourceConductance
+ here->BSIM3v32gds + here->BSIM3v32gbs
+ FwdSum + gcssb + sxpart * ggts
+ T1 * dsxpart_dVs + gbspsp));
(*(here->BSIM3v32DdpPtr) -= m * here->BSIM3v32drainConductance);
(*(here->BSIM3v32GbPtr) -= m * (gcggb + gcgdb + gcgsb + ggtb));
(*(here->BSIM3v32GdpPtr) += m * (gcgdb - ggtd));
(*(here->BSIM3v32GspPtr) += m * (gcgsb - ggts));
(*(here->BSIM3v32SspPtr) -= m * here->BSIM3v32sourceConductance);
(*(here->BSIM3v32BgPtr) += m * (gcbgb - here->BSIM3v32gbgs));
(*(here->BSIM3v32BdpPtr) += m * (gcbdb - here->BSIM3v32gbd + gbbdp));
(*(here->BSIM3v32BspPtr) += m * (gcbsb - here->BSIM3v32gbs + gbbsp));
(*(here->BSIM3v32DPdPtr) -= m * here->BSIM3v32drainConductance);
(*(here->BSIM3v32DPgPtr) += m * (Gm + gcdgb + dxpart * ggtg
+ T1 * ddxpart_dVg + gbdpg));
(*(here->BSIM3v32DPbPtr) -= m * (here->BSIM3v32gbd - Gmbs + gcdgb + gcddb
+ gcdsb - dxpart * ggtb
- T1 * ddxpart_dVb - gbdpb));
(*(here->BSIM3v32DPspPtr) -= m * (here->BSIM3v32gds + FwdSum - gcdsb
- dxpart * ggts - T1 * ddxpart_dVs - gbdpsp));
(*(here->BSIM3v32SPgPtr) += m * (gcsgb - Gm + sxpart * ggtg
+ T1 * dsxpart_dVg + gbspg));
(*(here->BSIM3v32SPsPtr) -= m * here->BSIM3v32sourceConductance);
(*(here->BSIM3v32SPbPtr) -= m * (here->BSIM3v32gbs + Gmbs + gcsgb + gcsdb
+ gcssb - sxpart * ggtb
- T1 * dsxpart_dVb - gbspb));
(*(here->BSIM3v32SPdpPtr) -= m * (here->BSIM3v32gds + RevSum - gcsdb
- sxpart * ggtd - T1 * dsxpart_dVd - gbspdp));
if (here->BSIM3v32nqsMod)
{
*(here->BSIM3v32QqPtr) += m * (gqdef + here->BSIM3v32gtau);
*(here->BSIM3v32DPqPtr) += m * (dxpart * here->BSIM3v32gtau);
*(here->BSIM3v32SPqPtr) += m * (sxpart * here->BSIM3v32gtau);
*(here->BSIM3v32GqPtr) -= m * here->BSIM3v32gtau;
*(here->BSIM3v32QgPtr) += m * (ggtg - gcqgb);
*(here->BSIM3v32QdpPtr) += m * (ggtd - gcqdb);
*(here->BSIM3v32QspPtr) += m * (ggts - gcqsb);
*(here->BSIM3v32QbPtr) += m * (ggtb - gcqbb);
}
#endif
line1000: ;
#ifndef USE_OMP
} /* End of Mosfet Instance */
} /* End of Model Instance */
#endif
return(OK);
}
#ifdef USE_OMP
void BSIM3v32LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt)
{
int InstCount, idx;
BSIM3v32instance **InstArray;
BSIM3v32instance *here;
BSIM3v32model *model = (BSIM3v32model*)inModel;
InstArray = model->BSIM3v32InstanceArray;
InstCount = model->BSIM3v32InstCount;
for (idx = 0; idx < InstCount; idx++) {
here = InstArray[idx];
model = BSIM3v32modPtr(here);
/* Update b for Ax = b */
(*(ckt->CKTrhs + here->BSIM3v32gNode) -= here->BSIM3v32rhsG);
(*(ckt->CKTrhs + here->BSIM3v32bNode) -= here->BSIM3v32rhsB);
(*(ckt->CKTrhs + here->BSIM3v32dNodePrime) += here->BSIM3v32rhsD);
(*(ckt->CKTrhs + here->BSIM3v32sNodePrime) += here->BSIM3v32rhsS);
if (here->BSIM3v32nqsMod)
(*(ckt->CKTrhs + here->BSIM3v32qNode) += here->BSIM3v32rhsQ);
/* Update A for Ax = b */
(*(here->BSIM3v32DdPtr) += here->BSIM3v32DdPt);
(*(here->BSIM3v32GgPtr) += here->BSIM3v32GgPt);
(*(here->BSIM3v32SsPtr) += here->BSIM3v32SsPt);
(*(here->BSIM3v32BbPtr) += here->BSIM3v32BbPt);
(*(here->BSIM3v32DPdpPtr) += here->BSIM3v32DPdpPt);
(*(here->BSIM3v32SPspPtr) += here->BSIM3v32SPspPt);
(*(here->BSIM3v32DdpPtr) -= here->BSIM3v32DdpPt);
(*(here->BSIM3v32GbPtr) -= here->BSIM3v32GbPt);
(*(here->BSIM3v32GdpPtr) += here->BSIM3v32GdpPt);
(*(here->BSIM3v32GspPtr) += here->BSIM3v32GspPt);
(*(here->BSIM3v32SspPtr) -= here->BSIM3v32SspPt);
(*(here->BSIM3v32BgPtr) += here->BSIM3v32BgPt);
(*(here->BSIM3v32BdpPtr) += here->BSIM3v32BdpPt);
(*(here->BSIM3v32BspPtr) += here->BSIM3v32BspPt);
(*(here->BSIM3v32DPdPtr) -= here->BSIM3v32DPdPt);
(*(here->BSIM3v32DPgPtr) += here->BSIM3v32DPgPt);
(*(here->BSIM3v32DPbPtr) -= here->BSIM3v32DPbPt);
(*(here->BSIM3v32DPspPtr) -= here->BSIM3v32DPspPt);
(*(here->BSIM3v32SPgPtr) += here->BSIM3v32SPgPt);
(*(here->BSIM3v32SPsPtr) -= here->BSIM3v32SPsPt);
(*(here->BSIM3v32SPbPtr) -= here->BSIM3v32SPbPt);
(*(here->BSIM3v32SPdpPtr) -= here->BSIM3v32SPdpPt);
if (here->BSIM3v32nqsMod)
{
*(here->BSIM3v32QqPtr) += here->BSIM3v32QqPt;
*(here->BSIM3v32DPqPtr) += here->BSIM3v32DPqPt;
*(here->BSIM3v32SPqPtr) += here->BSIM3v32SPqPt;
*(here->BSIM3v32GqPtr) -= here->BSIM3v32GqPt;
*(here->BSIM3v32QgPtr) += here->BSIM3v32QgPt;
*(here->BSIM3v32QdpPtr) += here->BSIM3v32QdpPt;
*(here->BSIM3v32QspPtr) += here->BSIM3v32QspPt;
*(here->BSIM3v32QbPtr) += here->BSIM3v32QbPt;
}
}
}
#endif
|
utils.c | /*
A simple 2D hydro code
(C) Romain Teyssier : CEA/IRFU -- original F90 code
(C) Pierre-Francois Lavallee : IDRIS -- original F90 code
(C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version
*/
/*
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
#include "utils.h"
// #include "parametres.h"
real_t **
allocate(int imin, int imax, int nvar) {
int i;
real_t **r = (real_t **) calloc(nvar, sizeof(real_t *));
assert(r != NULL);
for (i = 0; i < nvar; i++) {
r[i] = DMalloc(imax - imin + 1 + MallocGuard);
}
return r;
}
#ifndef __MIC__
#define NUMA_ALLOC 0
#endif
#ifdef __MIC__
#define MEMSET 1
#else
#define MEMSET 0
#endif
#if NUMA_ALLOC==1
#include <numa.h>
#endif
void DFree(real_t ** adr, size_t n)
{
#if NUMA_ALLOC == 1
numa_free(*adr, sizeof(real_t) * (n + MallocGuard));
#else
free(*adr);
#endif
*adr = NULL;
}
void IFree(int ** adr, size_t n)
{
#if NUMA_ALLOC == 1
numa_free(*adr, sizeof(int) * (n + MallocGuard));
#else
free(*adr);
#endif
*adr = NULL;
}
real_t *
DMalloc(size_t n) {
size_t i;
#if NUMA_ALLOC == 1
real_t *r = (real_t *) numa_alloc_interleaved((n + MallocGuard) * sizeof(real_t));
#else
real_t *r = (real_t *) calloc((n + MallocGuard), sizeof(real_t));
#endif
assert(r != NULL);
#if MEMSET == 1
memset(r, 1, n * sizeof(real_t));
#else
#ifndef NOTOUCHPAGE
#pragma omp parallel for private(i) shared(r)
for (i = 0; i < n; i++)
r[i] = 0.0L;
#endif
#endif
return r;
}
int *
IMalloc(size_t n) {
size_t i;
#if NUMA_ALLOC == 1
int *r = (int *) numa_alloc((n + MallocGuard) * sizeof(int));
#else
int *r = (int *) calloc((n + MallocGuard), sizeof(int));
#endif
assert(r != NULL);
#if MEMSET == 1
memset(r, 1, n * sizeof(int));
#else
#pragma omp parallel for private(i) shared(r)
for (i = 0; i < n; i++)
r[i] = 0;
#endif
return r;
}
#include "parametres.h"
#define VALPERLINE 16
void
printuoldf(FILE * fic, const hydroparam_t H, hydrovar_t * Hv) {
int i, j, nvar;
for (nvar = 0; nvar < H.nvar; nvar++) {
fprintf(fic, "=uold %d >\n", nvar);
for (j = 0; j < H.nyt; j++) {
int nbr = 1;
for (i = 0; i < H.nxt; i++) {
fprintf(fic, "%12.4e ", Hv->uold[IHv(i, j, nvar)]);
nbr++;
if (nbr == VALPERLINE) {
fprintf(fic, "\n");
fflush(fic);
nbr = 1;
}
}
if (nbr != 1)
fprintf(fic, "\n");
// fprintf(fic, "%%\n");
fflush(fic);
}
}
}
void
printarray(FILE * fic, real_t *a, int n, const char *nom, const hydroparam_t H) {
real_t (*ptr)[H.nxyt] = (real_t (*)[H.nxyt]) a;
long i, j, nbr = 1;
fprintf(fic, "=%s >\n", nom);
for (j = 0; j < H.nxystep; j++) {
nbr = 1;
for (i = 0; i < n; i++) {
fprintf(fic, "%12.4e ", ptr[j][i]);
nbr++;
if (nbr == VALPERLINE) {
fprintf(fic, "\n");
nbr = 1;
}
}
if (nbr != 1)
fprintf(fic, "\n");
}
fprintf(fic, "\n");
}
void
printarrayi(FILE * fic, int *a, int n, const char *nom) {
int i, nbr = 1;
fprintf(fic, "=%s >\n", nom);
for (i = 0; i < n; i++) {
fprintf(fic, "%4d ", a[i]);
nbr++;
if (nbr == VALPERLINE) {
fprintf(fic, "\n");
nbr = 1;
}
}
if (nbr != 1)
fprintf(fic, "\n");
}
void
printarrayv(FILE * fic, real_t *a, int n, const char *nom, const hydroparam_t H) {
int i, nbr = 1;
int nvar;
fprintf(fic, "=%s >\n", nom);
real_t (*ptr)[H.nxyt] = (real_t (*)[H.nxyt]) a;
for (nvar = 0; nvar < H.nvar; nvar++) {
nbr = 1;
for (i = 0; i < n; i++) {
fprintf(fic, "%12.4e ", ptr[nvar][i]);
nbr++;
if (nbr == VALPERLINE) {
fprintf(fic, "\n");
nbr = 1;
}
}
if (nbr != 1)
fprintf(fic, "\n");
fprintf(fic, "---\n");
}
}
void
printarrayv2(FILE * fic, real_t *a, int n, const char *nom, const hydroparam_t H) {
int i, j, nbr = 1;
int nvar;
fprintf(fic, "=%s >\n#", nom);
real_t (*ptr)[H.nxystep][H.nxyt] = (real_t (*)[H.nxystep][H.nxyt]) a;
for (nvar = 0; nvar < H.nvar; nvar++) {
for (j = 0; j < H.nxystep; j++) {
nbr = 1;
for (i = 0; i < n; i++) {
fprintf(fic, "%12.4le ", ptr[nvar][j][i]);
nbr++;
if (nbr == VALPERLINE) {
fprintf(fic, "\n#");
nbr = 1;
}
}
if (nbr != 1)
fprintf(fic, "@\n#");
}
fprintf(fic, "-J-\n#");
}
fprintf(fic, "---\n");
}
void
timeToString(char *buf, const double timeInS) {
char ctenth[10];
int hour = (int) (timeInS / 3600.0);
int minute = (int) ((timeInS - hour * 3600) / 60.0);
int second = (int) (timeInS - hour * 3600 - minute * 60);
float tenth = (float) (timeInS - hour * 3600 - minute * 60 - second);
sprintf(ctenth, "%.3f", tenth);
sprintf(buf, "%02d:%02d:%02d%s", hour, minute, second, &ctenth[1]);
}
// double
// cclock(void) {
// const double micro = 1.0e-06; /* Conversion constant */
// static long start = 0L, startu;
// struct timeval tp; /* Structure used by gettimeofday */
// double wall_time; /* To hold the result */
// if (gettimeofday(&tp, NULL) == -1)
// wall_time = -1.0e0;
// else if (!start) {
// start = tp.tv_sec;
// startu = tp.tv_usec;
// wall_time = 0.0e0;
// } else
// wall_time = (double) (tp.tv_sec - start) + micro * (tp.tv_usec - startu);
// return wall_time;
// }
//EOF
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
batchsvd.c | /* Copyright 2015. The Regents of the University of California.
* Copyright 2016. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2015 Frank Ong <frankong@berkeley.edu>
* 2016 Martin Uecker <martin.uecker@med.uni-goettingen.de>
*/
#include <math.h>
#include "misc/misc.h"
#include "num/blas.h"
#include "num/lapack.h"
#include "num/linalg.h"
#include "batchsvd.h"
void batch_svthresh(long M, long N, long num_blocks, float lambda, complex float dst[num_blocks][N][M])
{
#pragma omp parallel
{
long minMN = MIN(M, N);
PTR_ALLOC(complex float[minMN][M], U);
PTR_ALLOC(complex float[N][minMN], VT);
PTR_ALLOC(float[minMN], S);
PTR_ALLOC(complex float[minMN][minMN], AA);
#pragma omp for
for (int b = 0; b < num_blocks; b++) {
// Compute upper bound | A^T A |_inf
// FIXME: this is based on gratuitous guess-work about the obscure
// API of this FORTRAN from ancient times... Is it really worth it?
blas_csyrk('U', (N <= M) ? 'T' : 'N', (N <= M) ? N : M, (N <= M) ? M : N, 1., M, dst[b], 0., minMN, *AA);
// lambda_max( A ) <= max_i sum_j | a_i^T a_j |
float s_upperbound = 0;
for (int i = 0; i < minMN; i++) {
float s = 0;
for (int j = 0; j < minMN; j++)
s += cabsf((*AA)[MAX(i, j)][MIN(i, j)]);
s_upperbound = MAX(s_upperbound, s);
}
/* avoid doing SVD-based thresholding if we know from
* the upper bound that lambda_max <= lambda and the
* result must be zero */
if (s_upperbound < lambda * lambda) {
mat_zero(N, M, dst[b]);
continue;
}
lapack_svd_econ(M, N, *U, *VT, *S, dst[b]);
// soft threshold
for (int i = 0; i < minMN; i++)
for (int j = 0; j < N; j++)
(*VT)[j][i] *= ((*S)[i] < lambda) ? 0. : ((*S)[i] - lambda);
blas_matrix_multiply(M, N, minMN, dst[b], *U, *VT);
}
PTR_FREE(U);
PTR_FREE(VT);
PTR_FREE(S);
PTR_FREE(AA);
} // #pragma omp parallel
}
|
octree_openmp.c | /********************************************************************/
/* Octree partitioning 3-D points into spatial subvolumes */
/* using OPENMP project 2013 */
/* */
/* Implemented by Nikos Katirtzis (nikos912000) */
/********************************************************************/
/****************** Includes - Defines ******************/
#include "octree_openmp.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#include <time.h>
/*************** Defines - Initializations **************/
//Total number of points
int N;
//Maximum number of points allowed in a (sub)cube
int S;
// Define a matrix of boxes and leaves
Box *box;
Box *leaf;
double **A, **B;
// Counters
int box_counter = 1;
int leaf_counter = 0;
int num_points = 0;
int running_threads = 0;
// Maximum number of levels - to be printed
int num_levels = 0;
// Number of boxes in each level
int *level_boxes;
/******************** Functions ********************/
void* cubeCheck(void *arg);
void* cubeDivision(void *arg);
void searchNeighbours();
void checkB();
void checkBoundaries();
// Write to files
void AFile();
void BFile();
void neighboursFile();
/******************** Main function ****************/
int main(int argc, char** argv)
{
if (argc < 3)
{
printf("Error in arguments! Two arguments required: N and S\n");
return 0;
}
int i, j, position, total_time,B_counter = 0;
// get arguments
N = atoi(argv[1]);
S = atoi(argv[2]);
/*****************************************************************/
/* The original N*3 array of random points that belong to the */
/* first octant of the unit sphere (x^2+y^=^2+z^2=1, 0<=x,y,z<=1)*/
/*****************************************************************/
A = (double**)malloc(N * sizeof(double*));
if (A == NULL)
{
exit(1);
}
// number generator seed for different random sequence per run
srand(time(NULL));
// or this command if we want the same sequence for every iteration
//srand(0);
// all values are double because for large N floats aren't large enough
double y_max = 0;
for (i = 0; i < N; i++)
{
A[i] = (double*)malloc(3 * sizeof(double));
// points for which it states x^2+y^2+z^2=1 and 0<=x,y,z<=1
A[i][0] = ((double)rand()/(double)RAND_MAX);
// y_max = 1 - x^2
y_max = sqrt(1 - pow(A[i][0], 2));
A[i][1] = ((double)rand()/(double)RAND_MAX)*y_max;
// z = 1 - x^2 - y^2
A[i][2] = sqrt(1 - pow(A[i][0], 2) - pow(A[i][1], 2));
}
printf("Generation of points completed!\n");
// start timer
gettimeofday(&startwtime, NULL);
// memory allocation for array B
B = (double**) malloc(sizeof(double*) * N);
if (B == NULL)
{
exit(1);
}
for (i = 0; i < N; i++)
{
B[i]= (double*)malloc(sizeof(double) * 3);
}
// memory allocation (1 element) for array box
box = (Box*)malloc(1 * sizeof(Box));
if (box == NULL)
{
exit(1);
}
// initialize 1st box (unit cube))
box[0].level = 0;
box[0].boxid = 1;
box[0].parent = 0;
box[0].length = 1;
box[0].center[0] = 0.5;
box[0].center[1] = 0.5;
box[0].center[2] = 0.5;
box[0].start = 0;
box[0].n = N;
box[0].points = (int*)malloc(N * sizeof(int));
for(i = 0; i < 26; i++)
{
box[0].colleague[i] = 0;
}
for(i = 0; i < N; i++)
{
box[0].points[i] = i;
}
// begin calculations
position = 0;
cubeCheck(&position);
printf("Creation of octree completed!\n");
// find number of cubes in each level
level_boxes = (int*)malloc((num_levels + 1) * sizeof(int));
if (level_boxes == NULL){
exit(1);
}
printf("Maximum number of levels = %d\n", num_levels);
printf("Total number of cubes = %d\n", box_counter);
searchNeighbours();
printf("All colleagues have been found!\n");
//Copy all points from leafs to array B
for (i = 0; i < leaf_counter; i++)
{
leaf[i].start = B_counter;
for (j = 0; j < leaf[i].n; j++)
{
B[B_counter][0] = A[leaf[i].points[j]][0];
B[B_counter][1] = A[leaf[i].points[j]][1];
B[B_counter][2] = A[leaf[i].points[j]][2];
B_counter++;
}
}
printf("Array B updated!\n");
//checkB();
//checkBoundaries();
// file insertion
/*AFile();
BFile();
neighboursFile();
printf("File insertion completed!\n");*/
// stop timer
gettimeofday(&endwtime, NULL);
total_time = ((endwtime.tv_sec * 1000000 + endwtime.tv_usec) -(startwtime.tv_sec * 1000000 + startwtime.tv_usec));
printf("Total calculation time is: %d us\n", total_time);
printf("\nTask completed!\n");
return (EXIT_SUCCESS);
}
void* cubeCheck(void *arg)
{
int i;
int boxIndex = *(int *)arg;
Box temp_box, temp_parent;
#pragma omp critical(boxMutex)
{
temp_box = box[boxIndex];
temp_parent = box[temp_box.parent - 1];
}
/*Array with points indexes that belong to the cube*/
if (temp_box.boxid != 1)
{
temp_box.points = (int*)malloc(temp_parent.n * sizeof(int));
/*Checking how many points are included in the cube*/
for (i = 0; i < temp_parent.n; i++)
{
if (fabs(temp_box.center[0] - A[temp_parent.points[i]][0]) < temp_box.length / 2)
{
if (fabs(temp_box.center[1] - A[temp_parent.points[i]][1]) < temp_box.length / 2)
{
if (fabs(temp_box.center[2] - A[temp_parent.points[i]][2]) < temp_box.length / 2)
{
temp_box.n++;
temp_box.points[temp_box.n - 1] = temp_parent.points[i];
}
}
}
}
}
if (temp_box.n == 0)
{
// cube has no points (empty)...set boxid = 0 and this child of parent = 0
#pragma omp critical(boxMutex)
{
temp_box.boxid = 0;
box[boxIndex] = temp_box;
temp_parent.child[temp_box.child_index] = 0;
box[temp_parent.boxid - 1] = temp_parent;
}
}
else if (temp_box.n <= S)
{
// cube is a leaf
#pragma omp critical(leafMutex)
{
leaf_counter++;
leaf = (Box*)realloc(leaf, leaf_counter * sizeof(Box));
leaf[leaf_counter - 1] = temp_box;
// update total number of points
num_points += temp_box.n;
}
#pragma omp critical(boxMutex)
{
box[boxIndex] = temp_box;
}
}
else
{
#pragma omp critical(boxMutex)
{
box[boxIndex] = temp_box;
}
// create 8 subcubes
cubeDivision(&temp_box);
}
return NULL;
}
void* cubeDivision(void *arg)
{
Box cube = *(Box *)arg;
int i, j, pos[8];
#pragma omp critical(boxMutex)
{
// allocate memory for 8 more (sub)cubes
box = (Box*)realloc(box, (8 + box_counter) * sizeof(Box));
// initialize subcubes (children)
for (i = 0; i < 8; i++)
{
box_counter++;
box[box_counter - 1].level = cube.level + 1;
box[box_counter - 1].boxid = box_counter;
box[box_counter - 1].parent = cube.boxid;
box[box_counter - 1].length = cube.length / 2;
box[box_counter - 1].n = 0;
box[box_counter - 1].child_index = i;
// update parent with his child
box[cube.boxid - 1].child[i] = box_counter;
// initialize colleagues
box[box_counter - 1].colleague_counter = 0;
for(j = 0; j < 26; j++)
{
box[box_counter - 1].colleague[j] = 0;
}
}
cube.temp_counter = box_counter;
/* Set subcubes centers*/
// Left - Front - Down
box[box_counter - 8].center[0] = cube.center[0] - cube.length / 4;
box[box_counter - 8].center[1] = cube.center[1] - cube.length / 4;
box[box_counter - 8].center[2] = cube.center[2] - cube.length / 4;
// Left - Front - Up
box[box_counter - 7].center[0] = cube.center[0] - cube.length / 4;
box[box_counter - 7].center[1] = cube.center[1] - cube.length / 4;
box[box_counter - 7].center[2] = cube.center[2] + cube.length / 4;
// Left - Back - Down
box[box_counter - 6].center[0] = cube.center[0] - cube.length / 4;
box[box_counter - 6].center[1] = cube.center[1] + cube.length / 4;
box[box_counter - 6].center[2] = cube.center[2] - cube.length / 4;
// Left - Back - Up
box[box_counter - 5].center[0] = cube.center[0] - cube.length / 4;
box[box_counter - 5].center[1] = cube.center[1] + cube.length / 4;
box[box_counter - 5].center[2] = cube.center[2] + cube.length / 4;
// Right - Front - Down
box[box_counter - 4].center[0] = cube.center[0] + cube.length / 4;
box[box_counter - 4].center[1] = cube.center[1] - cube.length / 4;
box[box_counter - 4].center[2] = cube.center[2] - cube.length / 4;
// Right - Front - Up
box[box_counter - 3].center[0] = cube.center[0] + cube.length / 4;
box[box_counter - 3].center[1] = cube.center[1] - cube.length / 4;
box[box_counter - 3].center[2] = cube.center[2] + cube.length / 4;
// Right - Back - Down
box[box_counter - 2].center[0] = cube.center[0] + cube.length / 4;
box[box_counter - 2].center[1] = cube.center[1] + cube.length / 4;
box[box_counter - 2].center[2] = cube.center[2] - cube.length / 4;
// Right - Back - Up
box[box_counter - 1].center[0] = cube.center[0] + cube.length / 4;
box[box_counter - 1].center[1] = cube.center[1] + cube.length / 4;
box[box_counter - 1].center[2] = cube.center[2] + cube.length / 4;
// check if we have new max level
if (cube.level + 1 > num_levels)
{
num_levels = cube.level + 1;
}
}
for (i = 0; i < 8; i++)
{
pos[i] = cube.temp_counter - i - 1;
}
#pragma omp parallel shared(pos) private(i)
{
#pragma omp for schedule(dynamic,1) nowait
for (i = 7; i >= 0; i--)
{
cubeCheck(&pos[i]);
}
}
return NULL;
}
void searchNeighbours()
{
int level, i, j, m, parent_id, child_id, colleague_id, colleague_index;
double dist0, dist1, dist2;
/* find colleagues searching level by level */
for (level = 0; level < num_levels + 1; level++)
{
// search in all boxes
for (i = 1; i < box_counter; i++)
{
if (box[i].level == level)
{
parent_id = box[i].parent;
if (parent_id != 0)
{
for (j = 0; j < 8; j++)
{
child_id = box[parent_id - 1].child[j];
if (child_id != 0)
{
if (box[i].boxid != box[child_id - 1].boxid)
{
// all "brothers" are colleagues (we can ignore the distance)
// we found a colleague!
box[i].colleague[box[i].colleague_counter++] = box[child_id - 1].boxid;
}
}
}
for (j = 0; j < 26; j++)
{
colleague_id = box[parent_id - 1].colleague[j];
// check if parent's colleague has children (if it's not empty or a leaf one)
if (colleague_id != 0)
{
if (box[colleague_id - 1].n > S)
{
for (m = 0; m < 8; m++)
{
child_id = box[colleague_id - 1].child[m];
if (child_id != 0)
{
if (box[i].boxid != box[child_id - 1].boxid)
{
// calculate distances
dist0 = box[child_id - 1].center[0] - box[i].center[0];
dist1 = box[child_id - 1].center[1] - box[i].center[1];
dist2 = box[child_id - 1].center[2] - box[i].center[2];
//check if distance is <=root(3)*length (= for the case when we have one common point only)
if (sqrt(dist0 * dist0 + dist1 * dist1 + dist2 * dist2) <= sqrt(3) * box[i].length)
{
colleague_index = box[i].colleague_counter;
box[i].colleague[colleague_index] = box[child_id - 1].boxid;
box[i].colleague_counter++;
}
}
}
}
}
}
}
}
}
}
}
}
void checkB()
{
int i, j, same_counter = 0;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
if ((B[i][0] == A[j][0]) && (B[i][1] == A[j][1]) && (B[i][2] == A[j][2]))
{
same_counter++;
}
}
}
if (same_counter == N)
{
printf("All points of B are also points of A\n");
}
else
{
printf("Error with points of B\n");
}
}
void checkBoundaries()
{
int i, j, points_counter = 0;
double x, y, z;
for (i = 0; i < leaf_counter; i++)
{
for (j = 0; j < leaf[i].n; j++)
{
x = fabs(leaf[i].center[0] - A[leaf[i].points[j]][0]);
y = fabs(leaf[i].center[1] - A[leaf[i].points[j]][1]);
z = fabs(leaf[i].center[2] - A[leaf[i].points[j]][2]);
if (x < leaf[i].length / 2 && y < leaf[i].length / 2 && z < leaf[i].length / 2)
{
points_counter++;
}
}
}
if (points_counter == N)
{
printf("All points of leafs meet boundaries of subcubes\n");
}
else
{
printf("Error with points of leafs\n");
}
}
//write array A to file
void AFile()
{
remove("alpha.txt");
FILE *A_file;
int i;
A_file = fopen("A.txt", "wt");
for(i = 0; i < N; i++)
{
fprintf(A_file,"%f,%f,%f\n",A[i][0],A[i][1],A[i][2]); fflush(A_file);
}
fclose(A_file);
}
//write array B to file
void BFile()
{
remove("B.txt");
FILE *B_file;
int i;
B_file = fopen("B.txt", "wt");
for(i = 0; i < N; i++)
{
fprintf(B_file,"%f,%f,%f\n",B[i][0],B[i][1],B[i][2]); fflush(B_file);
}
fclose(B_file);
}
//write neighbours to file
void neighboursFile()
{
remove("colleagues.txt");
FILE *neighbours_file;
int i, j;
neighbours_file = fopen("neighbours.txt", "wt");
for( i = 0; i < box_counter; i++)
{
if (box[i].boxid != 0)
{
fprintf(neighbours_file,"id: %8d neighbours:",box[i].boxid); fflush(neighbours_file);
for(j = 0; j < 26; j++)
{
if(box[i].colleague[j] != 0)
{
fprintf(neighbours_file,"%8d",box[i].colleague[j]); fflush(neighbours_file);
}
}
fprintf(neighbours_file,"\n"); fflush(neighbours_file);
}
}
fclose(neighbours_file);
} |
ConstraintsContainerDms-impl.h | /**********************************************************************************************************************
This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich.
Licensed under the BSD-2 license (see LICENSE file in main directory)
**********************************************************************************************************************/
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::ConstraintsContainerDms(
std::shared_ptr<OptVectorDms<STATE_DIM, CONTROL_DIM, SCALAR>> w,
std::shared_ptr<tpl::TimeGrid<SCALAR>> timeGrid,
std::vector<std::shared_ptr<ShotContainer<STATE_DIM, CONTROL_DIM, SCALAR>>> shotContainers,
std::shared_ptr<ConstraintDiscretizer<STATE_DIM, CONTROL_DIM, SCALAR>> discretizedConstraints,
const state_vector_t& x0,
const DmsSettings settings)
: settings_(settings), shotContainers_(shotContainers)
{
c_init_ = std::shared_ptr<InitStateConstraint<STATE_DIM, CONTROL_DIM, SCALAR>>(
new InitStateConstraint<STATE_DIM, CONTROL_DIM, SCALAR>(x0, w));
this->constraints_.push_back(c_init_);
for (size_t shotNr = 0; shotNr < settings_.N_; shotNr++)
{
std::shared_ptr<ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>> c_i =
std::shared_ptr<ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>>(
new ContinuityConstraint<STATE_DIM, CONTROL_DIM, SCALAR>(shotContainers[shotNr], w, shotNr, settings));
this->constraints_.push_back(c_i);
}
if (discretizedConstraints)
{
std::cout << "Adding discretized constraints" << std::endl;
this->constraints_.push_back(discretizedConstraints);
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::prepareEvaluation()
{
#pragma omp parallel for num_threads(settings_.nThreads_)
for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer)
{
(*shotContainer)->integrateShot();
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::prepareJacobianEvaluation()
{
#pragma omp parallel for num_threads(settings_.nThreads_)
for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer)
{
(*shotContainer)->integrateSensitivities();
}
}
template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR>
void ConstraintsContainerDms<STATE_DIM, CONTROL_DIM, SCALAR>::changeInitialConstraint(const state_vector_t& x0)
{
c_init_->updateConstraint(x0);
}
|
GB_AxB_flopcount.c | //------------------------------------------------------------------------------
// GB_AxB_flopcount: compute flops for C<M>=A*B or C=A*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// On input, A and B are two matrices for C<M>=A*B or C=A*B. The flop count
// for each B(:,j) is computed, and returned as a cumulative sum. This
// function is CSR/CSC agnostic, but for simplicity of this description, assume
// A and B are both CSC matrices, so that ncols(A) == nrows(B). For both CSR
// and CSC, A->vdim == B->vlen holds. A and/or B may be hypersparse, in any
// combination.
// The complemented mask is not handled, so the flops for C<!M>=A*B is not
// computed.
// If present, Bflops has size (B->nvec)+1, for both standard and hypersparse
// B. Let n = B->vdim be the column dimension of B (that is, B is m-by-n).
// If B is a standard CSC matrix then Bflops has size n+1 == B->nvec+1, and on
// output, Bflops [j] is the # of flops required to compute C (:, 0:j-1). B->h
// is NULL, and is implicitly the vector 0:(n-1).
// If B is hypersparse, then let Bh = B->h. Its size is B->nvec, and j = Bh
// [kk] is the (kk)th column in the data structure for B. C will also be
// hypersparse, and only C(:,Bh) will be computed (C may have fewer non-empty
// columns than B). On output, Bflops [kk] is the number of needed flops to
// compute C (:, Bh [0:kk-1]).
// In both cases, Bflops [0] = 0, and Bflops [B->nvec] = total number of flops.
// The size of Bflops is B->nvec+1 so that it has the same size as B->p. The
// first entry of B->p and Bflops are both zero. This allows B to be sliced
// either by # of entries in B (by slicing B->p) or by the flop count required
// (by slicing Bflops).
// This algorithm does not look at the values of M, A, or B, just their
// patterns. If the mask is present, it is assumed to not be complemented.
// The flop count of C=A*B or C<M>=A*B is computed for a saxpy-based method;
// the work for A'*B for the dot product method is not computed.
// The algorithm scans all nonzeros in B. It only scans at most the min and
// max (first and last) row indices in A and M (if M is present). If A and M
// are not hypersparse, the time taken is O(nnz(B)+n). If all matrices are
// hypersparse, the time is O(nnz(B)*log(h)) where h = max # of vectors present
// in A and M. In pseudo-MATLAB, and assuming B is in standard (not
// hypersparse) form:
/*
[m n] = size (B) ;
Bflops = zeros (1,n+1) ; % (set to zero in the caller)
for each column j in B:
if (B (:,j) is empty) continue ;
if (M is present and M (:,j) is empty) continue ;
im_first = min row index of nonzeros in M(:,j)
im_last = max row index of nonzeros in M(:,j)
for each k where B (k,j) is nonzero:
aknz = nnz (A (:,k))
if (aknz == 0) continue ;
alo = min row index of nonzeros in A(:,k)
ahi = max row index of nonzeros in A(:,k)
if (M is present)
if (intersection (alo:ahi, im_first:im_last) empty) continue
end
% numerical phase will compute: C(:,j)<M(:,j)> += A(:,k)*B(k,j),
% which takes aknz flops, so:
Bflops (j) += aknz
Bflops_per_entry (k,j) = aknz
end
end
*/
// If Bflops and Bflops_per_entry are both NULL, then only the true/false
// result of the test (total_flops <= floplimit) is returned. This allows the
// function to return early, once the total_flops exceeds the threshold.
#include "GB_mxm.h"
#include "GB_ek_slice.h"
#include "GB_bracket.h"
#define GB_FREE_WORK \
{ \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \
GB_FREE_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Wlast, ntasks, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Flops, ntasks+1, sizeof (int64_t)) ; \
}
GrB_Info GB_AxB_flopcount
(
bool *result, // result of test (total_flops <= floplimit)
int64_t *Bflops, // size B->nvec+1 and all zero, if present
int64_t *Bflops_per_entry, // size nnz(B)+1 and all zero, if present
const GrB_Matrix M, // optional mask matrix
const GrB_Matrix A,
const GrB_Matrix B,
int64_t floplimit, // maximum flops to compute if Bflops NULL
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_OK_OR_NULL (GB_check (M, "M for flop count A*B", GB0)) ;
ASSERT_OK (GB_check (A, "A for flop count A*B", GB0)) ;
ASSERT_OK (GB_check (B, "B for flop count A*B", GB0)) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (A->vdim == B->vlen) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t bnz = GB_NNZ (B) ;
int64_t bnvec = B->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (bnz + bnvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// determine the kind of result to return
//--------------------------------------------------------------------------
bool check_quick_return = (Bflops == NULL) && (Bflops_per_entry == NULL) ;
#ifdef GB_DEBUG
if (Bflops != NULL)
{
// Bflops is set to zero in the calller
for (int64_t kk = 0 ; kk <= bnvec ; kk++)
{
ASSERT (Bflops [kk] == 0) ;
}
}
if (Bflops_per_entry != NULL)
{
// Bflops_per_entry is set to zero in the calller
for (int64_t pB = 0 ; pB <= bnz ; pB++)
{
ASSERT (Bflops_per_entry [pB] == 0) ;
}
}
#endif
//--------------------------------------------------------------------------
// get the mask, if present
//--------------------------------------------------------------------------
const int64_t *restrict Mh = NULL ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mi = NULL ;
int64_t mnvec = 0 ;
bool M_is_hyper = GB_IS_HYPER (M) ;
if (M != NULL)
{
Mh = M->h ;
Mp = M->p ;
Mi = M->i ;
mnvec = M->nvec ;
}
//--------------------------------------------------------------------------
// get A and B
//--------------------------------------------------------------------------
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
bool A_is_hyper = GB_IS_HYPER (A) ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bi = B->i ;
bool B_is_hyper = GB_IS_HYPER (B) ;
//--------------------------------------------------------------------------
// construct the parallel tasks
//--------------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1
// and vectors kfirst_slice [tid] to klast_slice [tid]. The first and
// last vectors may be shared with prior slices and subsequent slices.
int64_t *restrict Wfirst = NULL ; // size ntasks
int64_t *restrict Wlast = NULL ; // size ntasks
int64_t *restrict Flops = NULL ; // size ntasks+1
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, bnz) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t *pstart_slice, *kfirst_slice, *klast_slice ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, B, ntasks))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Wlast, ntasks, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Flops, ntasks+1, sizeof (int64_t)) ;
if (Wfirst == NULL || Wlast == NULL || Flops == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute flop counts for C<M> = A*B
//--------------------------------------------------------------------------
int64_t total_flops = 0 ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// skip this task if limit already reached
//----------------------------------------------------------------------
bool quick_return = false ;
int64_t flops_so_far = 0 ;
if (check_quick_return)
{
{
#pragma omp atomic read
flops_so_far = total_flops ;
}
if (flops_so_far > floplimit) continue ;
}
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
int64_t task_flops = 0 ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
int64_t mpleft = 0 ; // for GB_lookup of the mask M
//----------------------------------------------------------------------
// count flops for vectors kfirst to klast of B
//----------------------------------------------------------------------
for (int64_t kk = kfirst ; !quick_return && (kk <= klast) ; kk++)
{
//------------------------------------------------------------------
// find the part of B(:,j) to be computed by this task
//------------------------------------------------------------------
int64_t pB, pB_end ;
GB_get_pA_and_pC (&pB, &pB_end, NULL,
tid, kk, kfirst, klast, pstart_slice, NULL, NULL, Bp) ;
int64_t j = (B_is_hyper) ? Bh [kk] : kk ;
// C(:,j) is empty if B(:,j) is empty
int64_t bjnz = pB_end - pB ;
if (bjnz == 0) continue ;
//------------------------------------------------------------------
// see if M(:,j) is present and non-empty
//------------------------------------------------------------------
int64_t im_first = -1, im_last = -1 ;
if (M != NULL)
{
int64_t mpright = mnvec - 1 ;
int64_t pM, pM_end ;
GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mpright, j,
&pM, &pM_end) ;
int64_t mjnz = pM_end - pM ;
// C(:,j) is empty if M(:,j) is empty
if (mjnz == 0) continue ;
// M(:,j) has at least 1 entry; get 1st and last index in M(:,j)
im_first = Mi [pM] ;
im_last = Mi [pM_end-1] ;
}
//------------------------------------------------------------------
// trim Ah on right
//------------------------------------------------------------------
// Ah [0..A->nvec-1] holds the set of non-empty vectors of A, but
// only vectors k corresponding to nonzero entries B(k,j) are
// accessed for this vector B(:,j). If nnz (B(:,j)) > 2, prune the
// search space on the right, so the remaining calls to GB_lookup
// will only need to search Ah [pleft...pright-1]. pright does not
// change. pleft is advanced as B(:,j) is traversed, since the
// indices in B(:,j) are sorted in ascending order.
int64_t pleft = 0 ;
int64_t pright = anvec-1 ;
if (A_is_hyper && bjnz > 2)
{
// trim Ah [0..pright] to remove any entries past last B(:,j)
GB_bracket_right (Bi [pB_end-1], Ah, 0, &pright) ;
}
//------------------------------------------------------------------
// count the flops to compute C(:,j)<M(:,j)> = A*B(:,j)
//------------------------------------------------------------------
int64_t bjflops = 0 ;
for ( ; pB < pB_end ; pB++)
{
// B(k,j) is nonzero
int64_t k = Bi [pB] ;
// find A(:,k), reusing pleft since Bi [...] is sorted
int64_t pA, pA_end ;
GB_lookup (A_is_hyper, Ah, Ap, &pleft, pright, k, &pA, &pA_end);
// skip if A(:,k) empty
int64_t aknz = pA_end - pA ;
if (aknz == 0) continue ;
// skip if intersection of A(:,k) and M(:,j) is empty
if (M != NULL)
{
// A(:,k) is non-empty; get first and last index of A(:,k)
int64_t alo = Ai [pA] ;
int64_t ahi = Ai [pA_end-1] ;
if (ahi < im_first || alo > im_last) continue ;
}
// increment by flops for the single entry B(k,j)
// C(:,j)<M(:,j)> += A(:,k)*B(k,j).
bjflops += aknz ;
if (Bflops_per_entry != NULL)
{
// flops for the single entry, B(k,j)
Bflops_per_entry [pB] = aknz ;
}
// check for a quick return
if (check_quick_return)
{
flops_so_far += aknz ;
if (flops_so_far > floplimit)
{
// flop limit has been reached; terminate this and all
// other tasks
quick_return = true ;
break ;
}
}
}
//------------------------------------------------------------------
// sum up the flops for this task
//------------------------------------------------------------------
task_flops += bjflops ;
//------------------------------------------------------------------
// log the flops for B(:,j)
//------------------------------------------------------------------
if (Bflops != NULL)
{
if (kk == kfirst)
{
Wfirst [tid] = bjflops ;
}
else if (kk == klast)
{
Wlast [tid] = bjflops ;
}
else
{
Bflops [kk] = bjflops ;
}
}
}
//----------------------------------------------------------------------
// log the flops for this task
//----------------------------------------------------------------------
Flops [tid] = task_flops ;
if (check_quick_return)
{
#pragma omp atomic update
total_flops += task_flops ;
}
}
//--------------------------------------------------------------------------
// finalize the results
//--------------------------------------------------------------------------
if (check_quick_return)
{
// The only output of this function is the result of this test:
(*result) = (total_flops <= floplimit) ;
}
else
{
//----------------------------------------------------------------------
// cumulative sum of Bflops and Bflops_per_entry
//----------------------------------------------------------------------
GB_cumsum (Flops, ntasks, NULL, 1) ;
int64_t total_flops = Flops [ntasks] ;
(*result) = (total_flops <= floplimit) ;
if (Bflops != NULL)
{
//------------------------------------------------------------------
// reduce the first and last vector of each slice
//------------------------------------------------------------------
// See also Template/GB_reduce_each_vector.c
int64_t kprior = -1 ;
for (int tid = 0 ; tid < ntasks ; tid++)
{
//--------------------------------------------------------------
// sum up the partial flops that task tid computed for kfirst
//--------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pB = pstart_slice [tid] ;
int64_t pB_end =
GB_IMIN (Bp [kfirst+1], pstart_slice [tid+1]) ;
if (pB < pB_end)
{
if (kprior < kfirst)
{
// This task is the first one that did work on
// B(:,kfirst), so use it to start the reduction.
Bflops [kfirst] = Wfirst [tid] ;
}
else
{
// subsequent task for B(:,kfirst)
Bflops [kfirst] += Wfirst [tid] ;
}
kprior = kfirst ;
}
}
//--------------------------------------------------------------
// sum up the partial flops that task tid computed for klast
//--------------------------------------------------------------
if (kfirst < klast)
{
int64_t pB = Bp [klast] ;
int64_t pB_end = pstart_slice [tid+1] ;
if (pB < pB_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This task is the first one that did work on
// B(:,klast), so use it to start the reduction.
Bflops [klast] = Wlast [tid] ;
}
/*
else
{
// If kfirst < klast and B(:,klast) is not empty,
// then this task is always the first one to do
// work on B(:,klast), so this case is never used.
ASSERT (GB_DEAD_CODE) ;
// subsequent task to work on B(:,klast)
Bflops [klast] += Wlast [tid] ;
}
*/
kprior = klast ;
}
}
}
//------------------------------------------------------------------
// cumulative sum of Bflops
//------------------------------------------------------------------
// Bflops = cumsum ([0 Bflops]) ;
ASSERT (Bflops [bnvec] == 0) ;
GB_cumsum (Bflops, bnvec, NULL, nthreads) ;
// Bflops [bnvec] is now the total flop count
ASSERT (total_flops == Bflops [bnvec]) ;
}
if (Bflops_per_entry != NULL)
{
// Bflops_per_entry = cumsum ([0 Bflops_per_entry]) ;
ASSERT (Bflops_per_entry [bnz] == 0) ;
GB_cumsum (Bflops_per_entry, bnz, NULL, nthreads) ;
// Bflops_per_entry [bnz] is now the total flop count
ASSERT (total_flops == Bflops_per_entry [bnz]) ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
riemann.c | /*
Authors:
Takeshi I.
*/
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#define PI 3.14159265
/*
Define func as a function type that takes
a double and returns a double.
*/
typedef double (*func)(double);
/* Calculates the area under a curve */
double area(func, double, double, int);
double area_seq(func, double, double, int);
double square(double);
double trig_sin(double);
double trig_2_plus_sin(double);
int main(int argc, char* argv[]) {
int a = 0, b = 4, n = 500000000, threads = 4, t_enable = 0;
// if (argc == 6) {
// a = atoi(argv[1]);
// b = atoi(argv[2]);
// n = atoi(argv[3]);
// threads = atoi(argv[4]);
// t_enable = atoi(argv[5]);
// }
printf("Riemann Sum LHS approximation for f(x) = x^2 using:\n");
printf("start x: %d to ending x: %d, with %d rectangles (%d threads)\n", a, b, n, threads);
// Switches between threads and sequential
// if (t_enable == 1) { //use threads
// omp_set_num_threads(threads); //set number of threads
// printf("Parallel with %d threads\n\n", threads);
// printf("Area of x^2 is %f\n\n", area(square, a, b, n));
// } else { //sequential don't use threads
// printf("Sequential\n\n");
// printf("Area of x^2 is %f\n\n", area_seq(square, a, b, n));
// }
// //printf("area of 2 + sin is %f\n\n", area(trig_2_plus_sin, a, b, n));
// //printf("area of e^x is %f\n\n", area(exp, a, b, n));
// Will run parallel then sequential
omp_set_num_threads(threads);
clock_t start = clock();
printf("Parallel: %f\n", area(square, a, b, n));
clock_t stop = clock();
printf("\tParallel time: %f~\n", (((stop - start) / (double)CLOCKS_PER_SEC)) / threads);
clock_t start_s = clock();
printf("Serial: %f\n", area_seq(square, a, b, n));
clock_t stop_s = clock();
printf("\tSerial time: %f\n", (stop_s - start_s) / (double)CLOCKS_PER_SEC);
return 0;
}
/* Equavalent to pow(x, 2) */
double square(double x) {
return x * x;
}
double trig_sin(double x) {
return sin(x * PI / 180);
}
double trig_2_plus_sin(double x) {
return 2.0 + sin(x * PI / 180.0);
}
double sum = 0;
/*Left Riemann Sum */
double area(func f, double a, double b, int n) {
double x = 0, deltaX = fabs(b - a) / n;
#pragma omp parallel for reduction(+: sum)
for (int i = 0; i < n; i++) {
x = a + i * deltaX;
sum += f(x) * deltaX;
// sleep(1);
//printf("thread id: %d, sum: %f\n", omp_get_thread_num(), sum);
}
return sum;
}
/*Left Riemann Sum */
double area_seq(func f, double a, double b, int n) {
sum = 0;
double x = 0, deltaX = fabs(b - a) / n;
for (int i = 0; i < n; i++) {
x = a + i * deltaX;
sum += f(x) * deltaX;
// sleep(1);
}
return sum;
}
|
nstream.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
/**********************************************************************
NAME: nstream
PURPOSE: To compute memory bandwidth when adding a vector of a given
number of double precision values to the scalar multiple of
another vector of the same length, and storing the result in
a third vector.
USAGE: The program takes as input the number of threads, the number
of iterations to loop over the triad vectors, the length of the
vectors, and the offset between vectors
<progname> <# threads> <# iterations> <vector length> <offset>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
external functions are used in this program:
wtime()
bail_out()
checkTRIADresults()
NOTES: Bandwidth is determined as the number of words read, plus the
number of words written, times the size of the words, divided
by the execution time. For a vector length of N, the total
number of words read and written is 4*N*sizeof(double).
HISTORY: This code is loosely based on the Stream benchmark by John
McCalpin, but does not follow all the Stream rules. Hence,
reported results should not be associated with Stream in
external publications
REVISION: Modified by Tim Mattson to handle OpenMP correctly
REVISION: Modified by Rob Van der Wijngaart, December 2005, to
parameterize vector size and offsets through compiler flags.
Also removed all Stream cases except TRIAD.
REVISION: Modified by Rob Van der Wijngaart, May 2006, to introduce
dependence between successive triad operations. This is
necessary to avoid dead code elimination
**********************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define DEFAULTMAXLENGTH 2000000
#ifdef MAXLENGTH
#if MAXLENGTH > 0
#define N MAXLENGTH
#else
#define N DEFAULTMAXLENGTH
#endif
#else
#define N DEFAULTMAXLENGTH
#endif
#ifdef STATIC_ALLOCATION
/* use static to make sure it goes on the heap, not the stack */
static double a[N];
#else
static double * RESTRICT a;
#endif
static double * RESTRICT b;
static double * RESTRICT c;
#define SCALAR 3.0
static int checkTRIADresults(int, long int);
int main(int argc, char **argv)
{
long j, iter; /* dummies */
double scalar; /* constant used in Triad operation */
int iterations; /* number of times vector loop gets repeated */
long int length, /* total vector length */
offset; /* offset between vectors a and b, and b and c */
double bytes; /* memory IO size */
size_t space; /* memory used for a single vector */
double nstream_time, /* timing parameters */
avgtime;
int nthread_input; /* thread parameters */
int nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/**********************************************************************************
* process and test input parameters
***********************************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length> <offset>\n", *argv);
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
iterations = atoi(*++argv);
length = atol(*++argv);
offset = atol(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
if ((iterations < 1)) {
printf("ERROR: Invalid number of iterations: %d\n", iterations);
exit(EXIT_FAILURE);
}
if (length < 0) {
printf("ERROR: Invalid vector length: %ld\n", length);
exit(EXIT_FAILURE);
}
if (offset < 0) {
printf("ERROR: Incvalid array offset: %ld\n", offset);
exit(EXIT_FAILURE);
}
#ifdef STATIC_ALLOCATION
if ((3*length + 2*offset) > N) {
printf("ERROR: vector length/offset %ld/%ld too ", length, offset);
printf("large; increase MAXLENGTH in Makefile or decrease vector length\n");
exit(EXIT_FAILURE);
}
#endif
omp_set_num_threads(nthread_input);
#ifndef STATIC_ALLOCATION
space = (3*length + 2*offset)*sizeof(double);
a = (double *) malloc(space);
if (!a) {
printf("ERROR: Could not allocate %ld words for vectors\n",
3*length+2*offset);
exit(EXIT_FAILURE);
}
#endif
b = a + length + offset;
c = b + length + offset;
#pragma omp parallel private(j,iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP stream triad: A = B + scalar*C\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Vector length = %ld\n", length);
printf("Offset = %ld\n", offset);
printf("Number of iterations = %d\n", iterations);
}
}
bail_out(num_error);
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) {
a[j] = 0.0;
b[j] = 2.0;
c[j] = 2.0;
}
/* --- MAIN LOOP --- repeat Triad iterations times --- */
scalar = SCALAR;
for (iter=0; iter<=iterations; iter++) {
if (iter==1) {
#pragma omp barrier
#pragma omp master
{
nstream_time = wtime();
}
}
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) a[j] += b[j]+scalar*c[j];
} /* end of iterations */
#pragma omp barrier
#pragma omp master
{
nstream_time = wtime() - nstream_time;
}
} /* end of OpenMP parallel region */
/*********************************************************************
** Analyze and output results.
*********************************************************************/
bytes = 4.0 * sizeof(double) * length;
if (checkTRIADresults(iterations, length)) {
avgtime = nstream_time/iterations;
printf("Rate (MB/s): %lf Avg time (s): %lf\n",
1.0E-06 * bytes/avgtime, avgtime);
}
else exit(EXIT_FAILURE);
return 0;
}
int checkTRIADresults (int iterations, long int length) {
double aj, bj, cj, scalar, asum;
double epsilon = 1.e-8;
int j,iter;
/* reproduce initialization */
aj = 0.0;
bj = 2.0;
cj = 2.0;
/* now execute timing loop */
scalar = SCALAR;
for (iter=0; iter<=iterations; iter++) {
aj += bj+scalar*cj;
}
aj = aj * (double) (length);
asum = 0.0;
for (j=0; j<length; j++) asum += a[j];
#ifdef VERBOSE
printf ("Results Comparison: \n");
printf (" Expected checksum: %f\n",aj);
printf (" Observed checksum: %f\n",asum);
#endif
if (ABS(aj-asum)/asum > epsilon) {
printf ("Failed Validation on output array\n");
#ifndef VERBOSE
printf (" Expected checksum: %f \n",aj);
printf (" Observed checksum: %f \n",asum);
#endif
return (0);
}
else {
printf ("Solution validates\n");
return (1);
}
}
|
likelihoods.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_LIKELIHOODS_
#define GPB_LIKELIHOODS_
#define _USE_MATH_DEFINES // for M_SQRT1_2 and M_PI
#include <cmath>
#include <GPBoost/type_defs.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <string>
#include <set>
#include <string>
#include <vector>
#include <cmath>
#include <LightGBM/utils/log.h>
using LightGBM::Log;
//Mathematical constants usually defined in cmath
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795029
#endif
//sqrt(2)
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880
#endif
//1/sqrt(2)
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524401
#endif
//2/sqrt(pi)
#ifndef M_2_SQRTPI
#define M_2_SQRTPI 1.12837916709551257390
#endif
#include <chrono> // only for debugging
#include <thread> // only for debugging
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("TOTAL TIME for mode calculation: %g", el_time);// Only for debugging
namespace GPBoost {
/*!
* \brief This class implements the likelihoods for the Gaussian proceses
* The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t>
*/
template<typename T_mat, typename T_chol>
class Likelihood {
public:
/*! \brief Constructor */
Likelihood();
/*!
* \brief Constructor
* \param likelihood Type of likelihood
*/
Likelihood(string_t type,
data_size_t num_data,
data_size_t num_re) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
num_data_ = num_data;
num_re_ = num_re;
if (likelihood_type_ == "gamma") {
aux_pars_ = { 1. };//shape parameter, TODO: also estimate this parameter
}
chol_fact_pattern_analyzed_ = false;
}
/*!
* \brief Initialize mode vector_ (used in Laplace approximation for non-Gaussian data)
*/
void InitializeModeAvec() {
mode_ = vec_t::Zero(num_re_);
mode_previous_value_ = vec_t::Zero(num_re_);
mode_initialized_ = true;
first_deriv_ll_ = vec_t(num_data_);
second_deriv_neg_ll_ = vec_t(num_data_);
}
/*!
* \brief Reset mode to previous value. This is used if too large step-sizes are done which result in increases in the objective function.
" The values (covariance parameters and linear coefficients) are then discarded and consequently the mode should also be reset to the previous value)
*/
void ResetModeToPreviousValue() {
CHECK(mode_initialized_);
mode_ = mode_previous_value_;
}
/*! \brief Destructor */
~Likelihood() {
}
/*!
* \brief Returns the type of likelihood
*/
string_t GetLikelihood() const {
return(likelihood_type_);
}
/*!
* \brief Set the type of likelihood
* \param type Likelihood name
*/
void SetLikelihood(const string_t& type) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
chol_fact_pattern_analyzed_ = false;
}
/*!
* \brief Returns the type of the response variable (label). Either "double" or "int"
*/
string_t label_type() const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit" ||
likelihood_type_ == "poisson") {
return("int");
}
else {
return("double");
}
}
/*!
* \brief Checks whether the response variables (labels) have the correct values
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or float
void CheckY(const T* y_data, const data_size_t num_data) const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit") {
//#pragma omp parallel for schedule(static)//problematic with error message below...
for (data_size_t i = 0; i < num_data; ++i) {
if (fabs(y_data[i]) >= EPSILON_ && !AreSame<T>(y_data[i], 1.)) {
Log::REFatal("Response variable (label) data needs to be 0 or 1 for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
else if (likelihood_type_ == "poisson") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
else {
double intpart;
if (std::modf(y_data[i], &intpart) != 0.0) {
Log::REFatal("Found non-integer response variable. Response variable can only be integer valued for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
else if (likelihood_type_ == "gamma") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
/*!
* \brief Calculate normalizing constant for (log-)likelihood calculation
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or int
void CalculateNormalizingConstant(const T* y_data, const data_size_t num_data) {
if (likelihood_type_ == "poisson") {
double log_normalizing_constant = 0.;
#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] > 1) {
double log_factorial = 0.;
for (int k = 2; k <= y_data[i]; ++k) {
log_factorial += std::log(k);
}
log_normalizing_constant += log_factorial;
}
}
log_normalizing_constant_ = log_normalizing_constant;
}
else if (likelihood_type_ == "gamma") {
// //Currently not used since aux_pars_[0]==1 and thus log_normalizing_constant_==0
// double log_normalizing_constant = 0.;
//#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
// for (data_size_t i = 0; i < num_data; ++i) {
// log_normalizing_constant += -(aux_pars_[0] - 1.) * std::log(y_data[i]) - aux_pars_[0] * std::log(aux_pars_[0]) + std::tgamma(aux_pars_[0]);
// }
// log_normalizing_constant_ = log_normalizing_constant;
log_normalizing_constant_ = 0. * y_data[0];//y_data[0] is just a trick to avoid compiler warnings complaning about unreferenced parameters...
}
normalizing_constant_has_been_calculated_ = true;
}
/*!
* \brief Evaluate the log-likelihood conditional on the latent variable (=location_par)
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
double LogLikelihood(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (!normalizing_constant_has_been_calculated_) {
Log::REFatal("The normalizing constant has not been calculated. Call 'CalculateNormalizingConstant' first.");
}
double ll = 0.;
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
ll += std::log(1 - normalCDF(location_par[i]));
}
else {
ll += std::log(normalCDF(location_par[i]));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::log(1 + std::exp(location_par[i]));
//Alternative version:
//if (y_data_int[i] == 0) {
// ll += std::log(1 - CondMeanLikelihood(location_par[i]));//CondMeanLikelihood = logistic function
//}
//else {
// ll += std::log(CondMeanLikelihood(location_par[i]));
//}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::exp(location_par[i]);
}
ll -= log_normalizing_constant_;
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += -aux_pars_[0] * (location_par[i] + y_data[i] * std::exp(-location_par[i]));
}
ll -= log_normalizing_constant_;
}
return(ll);
}
/*!
* \brief Calculate the first derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcFirstDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
first_deriv_ll_[i] = -normalPDF(location_par[i]) / (1 - normalCDF(location_par[i]));
}
else {
first_deriv_ll_[i] = normalPDF(location_par[i]) / normalCDF(location_par[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - CondMeanLikelihood(location_par[i]);//CondMeanLikelihood = logistic(x)
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = aux_pars_[0] * (y_data[i] * std::exp(-location_par[i]) - 1.);
}
}
}
/*!
* \brief Calculate the second derivative of the negative (!) log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcSecondDerivNegLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
second_deriv_neg_ll_[i] = -dnorm_frac_one_min_pnorm * (location_par[i] - dnorm_frac_one_min_pnorm);
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
second_deriv_neg_ll_[i] = dnorm_frac_pnorm * (location_par[i] + dnorm_frac_pnorm);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
second_deriv_neg_ll_[i] = exp_loc_i * std::pow(1. + exp_loc_i, -2);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the third derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
* \param[out] third_deriv Third derivative of the log-likelihood with respect to the location parameter. Need to pre-allocate memory of size num_data
*/
void CalcThirdDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data, double* third_deriv) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
third_deriv[i] = dnorm_frac_one_min_pnorm * (1 - location_par[i] * location_par[i] +
dnorm_frac_one_min_pnorm * (3 * location_par[i] - 2 * dnorm_frac_one_min_pnorm));
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
third_deriv[i] = dnorm_frac_pnorm * (location_par[i] * location_par[i] - 1 +
dnorm_frac_pnorm * (3 * location_par[i] + 2 * dnorm_frac_pnorm));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
third_deriv[i] = -exp_loc_i * (1. - exp_loc_i) * std::pow(1 + exp_loc_i, -3);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = -std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double CondMeanLikelihood(const double value) const {
if (likelihood_type_ == "gaussian") {
return value;
}
else if (likelihood_type_ == "bernoulli_probit") {
return normalCDF(value);
}
else if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(-value));
}
else if (likelihood_type_ == "poisson") {
return std::exp(value);
}
else if (likelihood_type_ == "gamma") {
return std::exp(value);
}
else {
Log::REFatal("CondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the first derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double FirstDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(value));
}
else if (likelihood_type_ == "poisson") {
return 1.;
}
else if (likelihood_type_ == "gamma") {
return 1.;
}
else {
Log::REFatal("FirstDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the second derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double SecondDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
double exp_x = std::exp(value);
return -exp_x / ((1. + exp_x) * (1. + exp_x));
}
else if (likelihood_type_ == "poisson") {
return 0.;
}
else if (likelihood_type_ == "gamma") {
return 0.;
}
else {
Log::REFatal("SecondDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Do Cholesky decomposition
* \param[out] chol_fact Cholesky factor
* \param psi Matrix for which the Cholesky decomposition should be done
*/
template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr >
void CalcChol(T_chol& chol_fact, const T_mat_1& psi) {
if (!chol_fact_pattern_analyzed_) {
chol_fact.analyzePattern(psi);
chol_fact_pattern_analyzed_ = true;
}
chol_fact.factorize(psi);
}
template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr >
void CalcChol(T_chol& chol_fact, const T_mat_1& psi) {
chol_fact.compute(psi);
}
/*!
* \brief Apply permutation matrix of Cholesky factor (if it exists)
* \param chol_fact Cholesky factor
* \param M[out] Matrix to which the permutation is applied to
*/
template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr >
void ApplyPermutationCholeskyFactor(const T_chol& chol_fact, T_mat_1& M) {
if (chol_fact.permutationP().size() > 0) {//Apply permutation if an ordering is used
M = chol_fact.permutationP() * M;
}
}
template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr >
void ApplyPermutationCholeskyFactor(const T_chol&, T_mat_1&) {
}
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood..
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
if (no_fixed_effects) {
approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
double approx_marginal_ll_new;
vec_t rhs, v_aux;//auxiliary variables
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
T_mat Id(num_data, num_data);
Id.setIdentity();
T_mat Id_plus_Wsqrt_ZSigmaZt_Wsqrt;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor of matrix B = Id + Wsqrt * Z*Sigma*Zt * Wsqrt
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
// Update mode and a_vec_
rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();
v_aux = Wsqrt * (*ZSigmaZt) * rhs;
a_vec_ = rhs - Wsqrt * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux));
mode_ = (*ZSigmaZt) * a_vec_;
// Calculate new objective function
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLStable");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
}//end FindModePostRandEffCalcMLLStable
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t * const random_effects_indices_of_data,
double& approx_marginal_ll) {
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t diag_sqrt_ZtWZ(num_re_);//sqrt of diagonal matrix ZtWZ
T_mat Id(num_re_, num_re_);
Id.setIdentity();
T_mat Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt;
vec_t rhs, v_aux;
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate right hand side for mode update
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
//Non-parallel version
//for (data_size_t i = 0; i < num_data; ++i) {
// diag_sqrt_ZtWZ[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
//}
rhs = (diag_sqrt_ZtWZ.array() * mode_.array()).matrix();//rhs = ZtWZ * mode_ + Zt * first_deriv_ll_ for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
// Calculate Cholesky factor of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);//this is the bottleneck (for large data and sparse matrices)
////only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt));//only for debugging
//T_mat chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();//only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_: number non zeros = %d", GetNumberNonZeros<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt));//only for debugging
// Update mode and a_vec_
v_aux = (*Sigma) * rhs;
v_aux.array() *= diag_sqrt_ZtWZ.array();
a_vec_ = -chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux);
a_vec_.array() *= diag_sqrt_ZtWZ.array();
a_vec_.array() += rhs.array();
mode_ = (*Sigma) * a_vec_;
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end loop for finding mode
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);
approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: TOTAL TIME for mode calculation: %g", el_time);// Only for debugging
}//end FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_ZtWZ;
vec_t rhs;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate Cholesky factor and update mode
rhs = Zt * first_deriv_ll_ - SigmaI * mode_;//right hand side for updating mode
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
SigmaI_plus_ZtWZ.makeCompressed();
if (!chol_fact_pattern_analyzed_) {
chol_fact_SigmaI_plus_ZtWZ_grouped_.analyzePattern(SigmaI_plus_ZtWZ);
chol_fact_pattern_analyzed_ = true;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ);
mode_ += chol_fact_SigmaI_plus_ZtWZ_grouped_.solve(rhs);
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
location_par = Z * mode_;
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
SigmaI_plus_ZtWZ.makeCompressed();
chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ);
approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL()).diagonal().array().log().sum() + 0.5 * SigmaI.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLGroupedRE");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//double approx_marginal_ll_1 = -0.5 * (mode_.dot(SigmaI * mode_));
//double approx_marginal_ll_2 = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
//double approx_marginal_ll_3 = 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() - 0.5 * SigmaI.diagonal().array().log().sum();
//Log::REInfo("approx_marginal_ll_1: %g", approx_marginal_ll_1);
//Log::REInfo("approx_marginal_ll_2: %g", approx_marginal_ll_2);
//Log::REInfo("approx_marginal_ll_3: %g", approx_marginal_ll_3);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLGroupedRE
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t rhs;
diag_SigmaI_plus_ZtWZ_ = vec_t(num_re_);
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate rhs for mode update
rhs = - mode_ / sigma2;//right hand side for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
// Update mode
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
mode_ += (rhs.array() / diag_SigmaI_plus_ZtWZ_.array()).matrix();
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
approx_marginal_ll -= 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() + 0.5 * num_re_ * std::log(sigma2);
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
sp_mat_t SigmaI = B.transpose() * D_inv * B;
vec_t location_par;//location parameter = mode of random effects + fixed effects
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
if (no_fixed_effects) {
approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_W;
vec_t rhs, B_mode;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor and update mode
rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();//right hand side for updating mode
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
SigmaI_plus_W.makeCompressed();
//Calculation of the Cholesky factor is the bottleneck
if (!chol_fact_pattern_analyzed_) {
chol_fact_SigmaI_plus_ZtWZ_vecchia_.analyzePattern(SigmaI_plus_W);
chol_fact_pattern_analyzed_ = true;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);//This is the bottleneck for large data
//Log::REInfo("SigmaI_plus_W: number non zeros = %d", (int)SigmaI_plus_W.nonZeros());//only for debugging
//Log::REInfo("chol_fact_SigmaI_plus_ZtWZ: Number non zeros = %d", (int)((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).nonZeros());//only for debugging
mode_ = chol_fact_SigmaI_plus_ZtWZ_vecchia_.solve(rhs);
// Calculate new objective function
B_mode = B * mode_;
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
} // end loop for mode finding
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
SigmaI_plus_W.makeCompressed();
chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);
approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).diagonal().array().log().sum() + 0.5 * D_inv.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLVecchia");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLVecchia
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
T_mat L_inv_Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
L_inv_Wsqrt.setIdentity();
L_inv_Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_Wsqrt);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_Wsqrt);//L_inv_Wsqrt = L\Wsqrt
T_mat L_inv_Wsqrt_ZSigmaZt = L_inv_Wsqrt * (*ZSigmaZt);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
T_mat WI_plus_Sigma_inv = L_inv_Wsqrt.transpose() * L_inv_Wsqrt;//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_Wsqrt_ZSigmaZt^T*L_inv_Wsqrt_ZSigmaZt and (ii) "Z=Id"
vec_t d_mll_d_mode = (-0.5 * ((*ZSigmaZt).diagonal() - ((T_mat)(L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt)).diagonal()).array() * third_deriv.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (WI_plus_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * first_deriv_ll_;
d_mode_d_par = (v_aux.array() - ((*ZSigmaZt) * WI_plus_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat L_inv_Wsqrt_ZSigmaZt_sqr = L_inv_Wsqrt_ZSigmaZt.cwiseProduct(L_inv_Wsqrt_ZSigmaZt);
vec_t ZSigmaZtI_plus_W_inv_diag = (*ZSigmaZt).diagonal() - L_inv_Wsqrt_ZSigmaZt_sqr.transpose() * vec_t::Ones(L_inv_Wsqrt_ZSigmaZt_sqr.rows());// diagonal of (ZSigmaZt^-1 + W) ^ -1
vec_t d_mll_d_mode = (-0.5 * ZSigmaZtI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here
vec_t L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode = L_inv_Wsqrt_ZSigmaZt * d_mll_d_mode;// for implicit derivative
vec_t ZSigmaZtI_plus_W_inv_d_mll_d_mode = (*ZSigmaZt) * d_mll_d_mode - L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode;
vec_t d_mll_d_F_implicit = (ZSigmaZtI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_F_implicit;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxStable
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const std::vector<std::shared_ptr<RECompBase<T_mat>>> & re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t & fixed_effect_grad,
bool calc_mode = false) {
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
CHECK(re_comps_cluster_i.size() == 1);
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Matrix ZtWZsqrt
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
T_mat L_inv_ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
L_inv_ZtWZsqrt.setIdentity();
L_inv_ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
vec_t diag_ZtThirdDerivZ(num_re_);//sqrt of diagonal matrix ZtWZ
diag_ZtThirdDerivZ.setZero();
#pragma omp parallel
{
vec_t diag_ZtThirdDerivZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtThirdDerivZ_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtThirdDerivZ[i_re] += diag_ZtThirdDerivZ_private[i_re];
}
}//end omp critical
}//end omp parallel
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_ZtWZsqrt);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_ZtWZsqrt);//L_inv_ZtWZsqrt = L\ZtWZsqrt //This is the bottleneck (in this first part) for large data when using sparse matrices
T_mat L_inv_ZtWZsqrt_Sigma = L_inv_ZtWZsqrt * (*Sigma);
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt));//Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt_Sigma: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt_Sigma));//Only for debugging
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t ZtFirstDeriv(num_re_);//sqrt of diagonal matrix ZtWZ
ZtFirstDeriv.setZero();
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
T_mat ZtWZI_Sigma_inv = L_inv_ZtWZsqrt.transpose() * L_inv_ZtWZsqrt;//ZtWZI_Sigma_inv = ZtWZsqrt * L^T\(L\ZtWZsqrt) = ((ZtWZ)^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_ZtWZsqrt_Sigma^T*L_inv_ZtWZsqrt_Sigma
vec_t d_mll_d_mode = (-0.5 * ((*Sigma).diagonal() - ((T_mat)(L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma)).diagonal()).array() * diag_ZtThirdDerivZ.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) +
0.5 * (ZtWZI_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * ZtFirstDeriv;
d_mode_d_par = (v_aux.array() - ((*Sigma) * ZtWZI_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat L_inv_ZtWZsqrt_Sigma_sqr = L_inv_ZtWZsqrt_Sigma.cwiseProduct(L_inv_ZtWZsqrt_Sigma);
vec_t SigmaI_plus_ZtWZ_inv_diag = (*Sigma).diagonal() - L_inv_ZtWZsqrt_Sigma_sqr.transpose() * vec_t::Ones(L_inv_ZtWZsqrt_Sigma_sqr.rows());// diagonal of (Sigma^-1 + ZtWZ) ^ -1
vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_ZtWZ_inv_diag.array() * diag_ZtThirdDerivZ.array()).matrix();// gradient of approx. marginal likelihood wrt the mode
vec_t L_inv_ZtWZsqrt_Sigma_d_mll_d_mode = L_inv_ZtWZsqrt_Sigma * d_mll_d_mode;// for implicit derivative
vec_t SigmaI_plus_ZtWZ_inv_d_mll_d_mode = (*Sigma) * d_mll_d_mode - L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma_d_mll_d_mode;
fixed_effect_grad = -first_deriv_ll_;
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
fixed_effect_grad[i] += -0.5 * third_deriv[i] * SigmaI_plus_ZtWZ_inv_diag[random_effects_indices_of_data[i]] -
second_deriv_neg_ll_[i] * SigmaI_plus_ZtWZ_inv_d_mll_d_mode[random_effects_indices_of_data[i]];
}
}//end calc_F_grad
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: TOTAL TIME: %g", el_time);// Only for debugging
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
std::vector<data_size_t> cum_num_rand_eff_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
int num_REs = (int)SigmaI.cols();//number of random effect realizations
int num_comps = (int)cum_num_rand_eff_cluster_i.size() - 1;//number of different random effect components
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// Calculate (Sigma^-1 + Zt*W*Z)^-1
sp_mat_t L_inv(num_REs, num_REs);
L_inv.setIdentity();
if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering
L_inv = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * L_inv;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(L_inv);
sp_mat_t SigmaI_plus_ZtWZ_inv = L_inv.transpose() * L_inv;
// calculate gradient of approx. marginal likeligood wrt the mode
//Note: the calculation of d_mll_d_mode is the bottleneck of this function (corresponding lines below are indicated with * and, in particular, **)
vec_t d_mll_d_mode(num_REs);
sp_mat_t Zt_third_deriv = Zt * third_deriv.asDiagonal();//every column of Z multiplied elementwise by third_deriv
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs; ++i) {
vec_t diag_d_W_d_mode_i = Zt_third_deriv.row(i);//*can be slow
//calculate Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
sp_mat_t Zt_d_W_d_mode_i_Z = (Zt * diag_d_W_d_mode_i.asDiagonal() * Z).pruned();//**can be very slow. Note that this is also slow when the middle diagonal matrix is a pruned sparse matrix
////Variant 2: slower
//sp_mat_t Zt_third_deriv_diag = sp_mat_t(((vec_t)Zt_third_deriv.row(i)).asDiagonal());
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * Zt_third_deriv_diag * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
////Variant 3: slower
//vec_t Z_i = Z.col(i);// column number i of Z
//vec_t diag_d_W_d_mode_i = (Z_i.array() * third_deriv.array()).matrix();//diagonal of derivative of matrix W wrt random effect number i
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * diag_d_W_d_mode_i.asDiagonal() * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z
d_mll_d_mode[i] = -0.5 * (Zt_d_W_d_mode_i_Z.cwiseProduct(SigmaI_plus_ZtWZ_inv)).sum();
}
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
sp_mat_t ZtWZ = Zt * second_deriv_neg_ll_.asDiagonal() * Z;
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
vec_t SigmaI_mode = SigmaI * mode_;
double explicit_derivative;
sp_mat_t I_j(num_REs, num_REs);//Diagonal matrix with 1 on the diagonal for all random effects of component j and 0's otherwise
sp_mat_t I_j_ZtWZ;
for (int j = 0; j < num_comps; ++j) {
// calculate explicit derivative of approx. mariginal log-likelihood
std::vector<Triplet_t> triplets;//for constructing I_j
triplets.reserve(cum_num_rand_eff_cluster_i[j + 1] - cum_num_rand_eff_cluster_i[j]);
explicit_derivative = 0.;
for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
triplets.emplace_back(i, i, 1.);
explicit_derivative += SigmaI_mode[i] * mode_[i];
}
// Altervative version using parallelization (not faster)
//#pragma omp parallel
// {
// std::vector<Triplet_t> triplets_private;
// //triplets_private.reserve(cum_num_rand_eff_cluster_i[num_comps]);
//#pragma omp for nowait reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// triplets_private.emplace_back(i, i, 1.);
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
//#pragma omp critical
// triplets.insert(triplets.end(), triplets_private.begin(), triplets_private.end());
// }
//#pragma omp parallel for schedule(static) reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
explicit_derivative *= -0.5;
I_j.setFromTriplets(triplets.begin(), triplets.end());
I_j_ZtWZ = I_j * ZtWZ;
explicit_derivative += 0.5 * (SigmaI_plus_ZtWZ_inv.cwiseProduct(I_j_ZtWZ)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
d_mode_d_par = SigmaI_plus_ZtWZ_inv * I_j * Zt * first_deriv_ll_;
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_comps; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
vec_t d_detmll_d_F(num_data);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
sp_mat_t zi_zit = Zt.col(i) * Z.row(i);//=Z.row(i) * (Z.row(i)).transpose()
d_detmll_d_F[i] = -0.5 * third_deriv[i] * (SigmaI_plus_ZtWZ_inv.cwiseProduct(zi_zit)).sum();
}
vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * SigmaI_plus_ZtWZ_inv * Zt * second_deriv_neg_ll_.asDiagonal();
fixed_effect_grad = -first_deriv_ll_ + d_detmll_d_F - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxGroupedRE
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t third_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mll_d_mode[i_re] += third_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mll_d_mode.array() /= -2. * diag_SigmaI_plus_ZtWZ_.array();
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ[i_re] += diag_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
double explicit_derivative = -0.5 * (mode_.array() * mode_.array()).sum() / sigma2 +
0.5 * (diag_ZtWZ.array() / diag_SigmaI_plus_ZtWZ_.array()).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
vec_t d_mode_d_par = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t first_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mode_d_par[i_re] += first_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mode_d_par.array() /= diag_SigmaI_plus_ZtWZ_.array();
cov_grad[0] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad[0]: %g", cov_grad[0]);
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
fixed_effect_grad[i] = -first_deriv_ll_[i] -
0.5 * third_deriv[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]] - //=d_detmll_d_F
d_mll_d_mode[random_effects_indices_of_data[i]] * second_deriv_neg_ll_[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]];//=implicit derivative = d_mll_d_mode * d_mode_d_F
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]);
//}
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param B_grad Derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation
* \param D_grad Derivatives of matrices D for Vecchia approximation
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const std::vector<sp_mat_t>& B_grad,
const std::vector<sp_mat_t>& D_grad,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
// Calculate (Sigma^-1 + W)^-1
sp_mat_t L_inv(num_data, num_data);
L_inv.setIdentity();
if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering
L_inv = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * L_inv;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(L_inv);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
sp_mat_t SigmaI_plus_W_inv = L_inv.transpose() * L_inv;//Note: this is the computational bottleneck for large data
vec_t d_mll_d_mode = -0.5 * (SigmaI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();// gradient of approx. marginal likeligood wrt the mode
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
double explicit_derivative;
int num_par = (int)B_grad.size();
sp_mat_t SigmaI_deriv;
sp_mat_t BgradT_Dinv_B;
sp_mat_t Bt_Dinv_Bgrad;
for (int j = 0; j < num_par; ++j) {
SigmaI_deriv = B_grad[j].transpose() * D_inv * B;
Bt_Dinv_Bgrad = SigmaI_deriv.transpose();
SigmaI_deriv += Bt_Dinv_Bgrad - B.transpose() * D_inv * D_grad[j] * D_inv * B;
d_mode_d_par = -SigmaI_plus_W_inv * SigmaI_deriv * mode_;
explicit_derivative = 0.5 * mode_.dot(SigmaI_deriv * mode_) +
0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
// Alternative version (not faster)
//vec_t u = D_inv * B * mode_;
//vec_t uk = B_grad[j] * mode_;
//explicit_derivative = uk.dot(u) - 0.5 * u.dot(D_grad[j] * u) +
// 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_par; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
sp_mat_t L_inv_sqr = L_inv.cwiseProduct(L_inv);
vec_t SigmaI_plus_W_inv_diag = L_inv_sqr.transpose() * vec_t::Ones(L_inv_sqr.rows());// diagonal of (Sigma^-1 + W) ^ -1
vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here
vec_t L_inv_d_mll_d_mode = L_inv * d_mll_d_mode;// for implicit derivative
vec_t SigmaI_plus_W_inv_d_mll_d_mode = L_inv.transpose() * L_inv_d_mll_d_mode;
vec_t d_mll_d_F_implicit = -(SigmaI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode + d_mll_d_F_implicit;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxVecchia
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
T_mat Maux = Wsqrt * Cross_Cov.transpose();
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov -= Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxStable");
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxStable
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects,
num_data, Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
if (calc_pred_cov || calc_pred_var) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
ZtWZsqrt.setIdentity();
ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
T_mat Maux = ZtWZsqrt * Cross_Cov.transpose();
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);//Maux = L\(ZtWZsqrt * Cross_Cov^T)
if (calc_pred_cov) {
pred_cov -= Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGPCalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
// calculate Maux = L\(Z^T * second_deriv_neg_ll_.asDiagonal() * Cross_Cov^T)
T_mat Maux = Zt * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose();
if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering
Maux = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * Maux;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov += Maux.transpose() * Maux - (T_mat)(Cross_Cov * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose());
}
if (calc_pred_var) {
T_mat Maux3 = Cross_Cov.cwiseProduct(Cross_Cov * second_deriv_neg_ll_.asDiagonal());
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux.col(i).sum() - Maux3.row(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxGroupedRE");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(), 3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxGroupedRE
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
vec_t diag_Sigma_plus_ZtWZI = vec_t(num_re_);
diag_Sigma_plus_ZtWZI.array() = 1. / diag_SigmaI_plus_ZtWZ_.array();
diag_Sigma_plus_ZtWZI.array() /= sigma2;
diag_Sigma_plus_ZtWZI.array() -= 1.;
diag_Sigma_plus_ZtWZI.array() /= sigma2;
if (calc_pred_cov) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal() * Cross_Cov.transpose();
pred_cov += Maux;
}
if (calc_pred_var) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal();
T_mat Maux2 = Cross_Cov.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux2.row(i).sum();
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
T_mat SigmaI_CrossCovT = B.transpose() * D_inv * B * Cross_Cov.transpose();
T_mat Maux = SigmaI_CrossCovT; //Maux = L\(Sigma^-1 * Cross_Cov^T), L = Chol(Sigma^-1 + W)
if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering
Maux = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * Maux;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov += -Cross_Cov * SigmaI_CrossCovT + Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux.col(i).sum() - (Cross_Cov.row(i)).dot(SigmaI_CrossCovT.col(i));
}
}
}
}//end PredictLAApproxVecchia
/*!
* \brief Make predictions for the response variable (label) based on predictions for the mean and variance of the latent random effects
* \param pred_mean[out] Predicted mean of latent random effects. The predicted mean for the response variables is written on this
* \param pred_var[out] Predicted variances of latent random effects. The predicted variance for the response variables is written on this
* \param predict_var If true, predictive variances are also calculated
*/
void PredictResponse(vec_t& pred_mean, vec_t& pred_var, bool predict_var = false) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = normalCDF(pred_mean[i] / std::sqrt(1. + pred_var[i]));
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + pm;
}
pred_mean[i] = pm;
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + psm / aux_pars_[0];
}
pred_mean[i] = pm;
}
}
}
/*!
* \brief Adaptive GH quadrature to calculate predictive mean of response variable
* \param latent_mean Predicted mean of latent random effects
* \param latent_var Predicted variances of latent random effects
*/
double RespMeanAdaptiveGHQuadrature(const double latent_mean, const double latent_var) {
// Find mode of integrand
double mode_integrand, mode_integrand_last, update;
mode_integrand = 0.;
double sigma2_inv = 1. / latent_var;
double sqrt_sigma2_inv = std::sqrt(sigma2_inv);
for (int it = 0; it < 100; ++it) {
mode_integrand_last = mode_integrand;
update = (FirstDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv * (mode_integrand - latent_mean))
/ (SecondDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv);
mode_integrand -= update;
if (std::abs(update) / std::abs(mode_integrand_last) < DELTA_REL_CONV_) {
break;
}
}
// Adaptive GH quadrature
double sqrt2_sigma_hat = M_SQRT2 / std::sqrt(-SecondDerivLogCondMeanLikelihood(mode_integrand) + sigma2_inv);
double x_val;
double mean_resp = 0.;
for (int j = 0; j < order_GH_; ++j) {
x_val = sqrt2_sigma_hat * GH_nodes_[j] + mode_integrand;
mean_resp += adaptive_GH_weights_[j] * CondMeanLikelihood(x_val) * normalPDF(sqrt_sigma2_inv * (x_val - latent_mean));
}
mean_resp *= sqrt2_sigma_hat * sqrt_sigma2_inv;
return mean_resp;
////non-adaptive GH quadrature
//double mean_resp = 0.;
//double sigma = std::sqrt(latent_var);
//for (int j = 0; j < order_GH_; ++j) {
// mean_resp += GH_weights_[j] * CondMeanLikelihood(M_SQRT2 * sigma * GH_nodes_[j] + latent_mean);
//}
//pred_mean *= M_1_SQRTPI_;
}
template <typename T>//T can be double or float
bool AreSame(const T a, const T b) const {
return fabs(a - b) < a * EPSILON_;
}
// Used for likelihood_type_ == "bernoulli_probit"
inline double normalCDF(double value) const {
return 0.5 * std::erfc(-value * M_SQRT1_2);
}
inline double normalPDF(double value) const {
return std::exp(-value * value / 2) / M_SQRT2PI_;
//return std::exp(-value * value / 2) / std::sqrt(2 * M_PI);
}
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Number (dimension) of random effects */
data_size_t num_re_;
/*! \brief Posterior mode used for Laplace approximation */
vec_t mode_;
/*! \brief Posterior mode used for Laplace approximation: saving a previously found value allows for reseting the mode when having a too large step size. */
vec_t mode_previous_value_;
/*! \brief Auxiliary variable a=ZSigmaZt^-1 mode_b used for Laplace approximation */
vec_t a_vec_;
/*! \brief First derivatives of the log-likelihood */
vec_t first_deriv_ll_;
/*! \brief Second derivatives of the negative log-likelihood (diagonal of matrix "W") */
vec_t second_deriv_neg_ll_;
/*! \brief Diagonal of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'GroupedRE' when there is only one random effect and ZtWZ is diagonal. Otherwise 'diag_SigmaI_plus_ZtWZ_' is used for grouped REs) */
vec_t diag_SigmaI_plus_ZtWZ_;
/*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version'GroupedRE' if there is more than one random effect). */
chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_grouped_;
/*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'Vecchia') */
chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_vecchia_;
//Note: chol_sp_mat_AMDOrder_t (AMD permutation) is faster than chol_sp_mat_t (no permutation) for the Vecchia approcimation but for the grouped random effects the difference is small.
// chol_sp_mat_COLAMDOrder_t is slower than no ordering or chol_sp_mat_AMDOrder_t for both grouped random effects and the Vecchia approximation
/*!
* \brief Cholesky factors of matrix B = I + Wsqrt * Z * Sigma * Zt * Wsqrt in Laplace approximation (for version 'Stable')
* or of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt (for version 'OnlyOneGPCalculationsOnREScale')
*/
T_chol chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_;
/*! \brief If true, the pattern for the Cholesky factor (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, chol_fact_SigmaI_plus_ZtWZ_grouped_, or chol_fact_SigmaI_plus_ZtWZ_vecchia_) has been analyzed */
bool chol_fact_pattern_analyzed_ = false;
/*! \brief If true, the mode has been initialized to 0 */
bool mode_initialized_ = false;
/*! \brief If true, the mode has been determined */
bool mode_has_been_calculated_ = false;
/*! \brief If true, the function 'CheckY' has been called */
bool normalizing_constant_has_been_calculated_ = false;
/*! \brief Normalizing constant for likelihoods (not all likelihoods have one) */
double log_normalizing_constant_;
/*! \brief Type of likelihood */
string_t likelihood_type_ = "gaussian";
/*! \brief List of supported covariance likelihoods */
const std::set<string_t> SUPPORTED_LIKELIHOODS_{ "gaussian", "bernoulli_probit", "bernoulli_logit", "poisson", "gamma" };
/*! \brief Tolerance level when comparing two doubles for equality */
double EPSILON_ = 1e-6;
/*! \brief Maximal number of iteration done for finding posterior mode with Newton's method */
int MAXIT_MODE_NEWTON_ = 1000;
/*! \brief Used for cheking convergence in mode finding algorithm (terminate if relative change in Laplace approx. is below this value) */
double DELTA_REL_CONV_ = 1e-6;
/*! \brief Additional parameters for likelihoods. For gamma, auxiliary_pars_[0] = shape parameter */
std::vector<double> aux_pars_;
string_t ParseLikelihoodAlias(const string_t& likelihood) {
if (likelihood == string_t("binary") || likelihood == string_t("bernoulli_probit") || likelihood == string_t("binary_probit")) {
return "bernoulli_probit";
}
else if (likelihood == string_t("gaussian") || likelihood == string_t("regression")) {
return "gaussian";
}
return likelihood;
}
//Derived constants not defined in cmath
//1/sqrt(2*pi)
const double M_SQRT2PI_ = std::sqrt(2. * M_PI);
////1/sqrt(pi) (not used anymore, used for non-adaptive GH quadrature)
//const double M_1_SQRTPI_ = M_2_SQRTPI / 2.;
/*! \brief Order of the Gauss-Hermite quadrature */
int order_GH_ = 30;
/*! \brief Nodes and weights for the Gauss-Hermite quadrature */
// Source: https://keisan.casio.com/exec/system/1281195844
const std::vector<double> GH_nodes_ = { -6.863345293529891581061,
-6.138279220123934620395,
-5.533147151567495725118,
-4.988918968589943944486,
-4.48305535709251834189,
-4.003908603861228815228,
-3.544443873155349886925,
-3.099970529586441748689,
-2.667132124535617200571,
-2.243391467761504072473,
-1.826741143603688038836,
-1.415527800198188511941,
-1.008338271046723461805,
-0.6039210586255523077782,
-0.2011285765488714855458,
0.2011285765488714855458,
0.6039210586255523077782,
1.008338271046723461805,
1.415527800198188511941,
1.826741143603688038836,
2.243391467761504072473,
2.667132124535617200571,
3.099970529586441748689,
3.544443873155349886925,
4.003908603861228815228,
4.48305535709251834189,
4.988918968589943944486,
5.533147151567495725118,
6.138279220123934620395,
6.863345293529891581061 };
const std::vector<double> GH_weights_ = { 2.908254700131226229411E-21,
2.8103336027509037088E-17,
2.87860708054870606219E-14,
8.106186297463044204E-12,
9.1785804243785282085E-10,
5.10852245077594627739E-8,
1.57909488732471028835E-6,
2.9387252289229876415E-5,
3.48310124318685523421E-4,
0.00273792247306765846299,
0.0147038297048266835153,
0.0551441768702342511681,
0.1467358475408900997517,
0.2801309308392126674135,
0.386394889541813862556,
0.3863948895418138625556,
0.2801309308392126674135,
0.1467358475408900997517,
0.0551441768702342511681,
0.01470382970482668351528,
0.002737922473067658462989,
3.48310124318685523421E-4,
2.938725228922987641501E-5,
1.579094887324710288346E-6,
5.1085224507759462774E-8,
9.1785804243785282085E-10,
8.10618629746304420399E-12,
2.87860708054870606219E-14,
2.81033360275090370876E-17,
2.9082547001312262294E-21 };
const std::vector<double> adaptive_GH_weights_ = { 0.83424747101276179534,
0.64909798155426670071,
0.56940269194964050397,
0.52252568933135454964,
0.491057995832882696506,
0.46837481256472881677,
0.45132103599118862129,
0.438177022652683703695,
0.4279180629327437485828,
0.4198950037368240886418,
0.413679363611138937184,
0.4089815750035316024972,
0.4056051233256844363121,
0.403419816924804022553,
0.402346066701902927115,
0.4023460667019029271154,
0.4034198169248040225528,
0.4056051233256844363121,
0.4089815750035316024972,
0.413679363611138937184,
0.4198950037368240886418,
0.427918062932743748583,
0.4381770226526837037,
0.45132103599118862129,
0.46837481256472881677,
0.4910579958328826965056,
0.52252568933135454964,
0.56940269194964050397,
0.64909798155426670071,
0.83424747101276179534 };
/*! \brief Get number of non-zero entries in matrix */
template <class T_mat1, typename std::enable_if< std::is_same<sp_mat_t, T_mat1>::value>::type * = nullptr >
int GetNumberNonZeros(T_mat1 M) {
return((int)M.nonZeros());
};
template <class T_mat1, typename std::enable_if< std::is_same<den_mat_t, T_mat1>::value>::type * = nullptr >
int GetNumberNonZeros(T_mat1 M) {
return((int)M.cols() * M.rows());
};
};//end class Likelihood
} // namespace GPBoost
#endif // GPB_LIKELIHOODS_
|
main.c | /* cc -lm t4.c -qsmp */
#include "data.h"
#include "io.h"
/* utility routines */
FLT system_clock(FLT *x);
FLT **matrix(int nrl,int nrh,int ncl,int nch);
/* work routines */
void mset(FLT **m, int n, int in);
FLT mcheck(FLT **m, int n, int in);
void over(FLT ** mat,int size);
int main(int argc,char *argv[]) {
FLT **m1,**m2,**m3,**m4;
FLT t0_start;
FLT t1_start,t1_end,e1;
FLT t2_start,t2_end,e2;
FLT t3_start,t3_end,e3;
FLT t4_start,t4_end,e4;
int n,narg,iarg;
int diag[5];
diag[0]=10;
diag[1]=20;
diag[2]=30;
diag[3]=40;
diag[4]=50;
iarg=argc;
if(iarg > 5)iarg=5;
if(iarg > 1){
for (narg=1;narg<=iarg;narg++) {
diag[narg-1]=atoi(argv[narg]);
}
}
for(narg=0;narg<5;narg++)
printf("%d ",diag[narg]);
printf("\n");
n=diag[4];
m1=matrix(1,n,1,n);
m2=matrix(1,n,1,n);
m3=matrix(1,n,1,n);
m4=matrix(1,n,1,n);
mset(m1,n,diag[0]);
mset(m2,n,diag[1]);
mset(m3,n,diag[2]);
mset(m4,n,diag[3]);
system_clock(&t0_start);
#pragma omp parallel sections
{
#pragma omp section
{
system_clock(&t1_start);
over(m1,n);
over(m1,n);
system_clock(&t1_end);
e1=mcheck(m1,n,diag[0]);
t1_start=t1_start-t0_start;
t1_end=t1_end-t0_start;
}
#pragma omp section
{
system_clock(&t2_start);
over(m2,n);
over(m2,n);
system_clock(&t2_end);
e2=mcheck(m2,n,diag[1]);
t2_start=t2_start-t0_start;
t2_end=t2_end-t0_start;
}
#pragma omp section
{
system_clock(&t3_start);
over(m3,n);
over(m3,n);
system_clock(&t3_end);
e3=mcheck(m3,n,diag[2]);
t3_start=t3_start-t0_start;
t3_end=t3_end-t0_start;
}
#pragma omp section
{
system_clock(&t4_start);
over(m4,n);
over(m4,n);
system_clock(&t4_end);
e4=mcheck(m4,n,diag[3]);
t4_start=t4_start-t0_start;
t4_end=t4_end-t0_start;
}
}
io( t1_start, t1_end, e1,
t2_start, t2_end, e2,
t3_start, t3_end, e3,
t4_start, t4_end, e4) ;
e1=e1+e2+e3+e4;
if (e1 < 1) {
return 0;
}
else {
return 1;
}
}
|
naive_bayes_cilk.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <memory.h>
#include <sys/time.h>
#include <cilk/cilk.h>
#define REAL double
#define EPSILON 0.000001
#define MICRO_IN_SEC 1000000.00
/*__declspec(target(mic)) double begin, end;
__declspec(target(mic))
*/
double begin,end;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
int caculateNB_train(char *ifn) ;
int caculateNB_classify(char *ifn);
int main(int argc,char* argv[]) {
char *ifn=NULL,*argument=NULL;
if(argc<3)
{
printf("Wrong command format! usage:COMMAND ARGUMENT INPUTFILENAME\nARGUMENT:\n\ttrain:\ttrain the classifier\n\tclassify:\tclassify the dataset\n");
return 0;
}
else
{
ifn=argv[2];
argument=argv[1];
if(!strcmp(argument,"train")) {
caculateNB_train(ifn);
} else if(!strcmp(argument,"classify")) {
caculateNB_classify(ifn);
} else {
printf("Error command!\n");
}
return 0;
}
}
int caculateNB_train(char *ifn) {
char *ofn="nb_train_result.txt";
FILE *ifp,*ofp;
int i,j,k,class_cluster,a,linen,propertyn,classn,*array_valuen,*array_class,*array_property_class,*array_counts,*array_index,array_length;
REAL *array_class_probability,*array_probability;
begin = microtime();
if((ifp=fopen(ifn,"r"))==NULL)
{
printf("%s file open error!\n",ifn);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn);
}
if((ofp=fopen(ofn,"w"))==NULL)
{
printf("%s file open error!\n",ofn);
fclose(ifp);
exit(0);
}
else
{
printf("%s file opened success!\n",ofn);
}
printf("Get base info\n");
fscanf(ifp,"%d%d%d",&linen,&propertyn,&classn); //linen is the number of dataset lines, \
propertyn is the number of property of every dataset, classn is the number of classes;
printf("Read data\n");
//array_valuen is an array of the max value of every property
if((array_valuen=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
exit(0);
}
printf("Get property number\n");
for(int i=0;i<propertyn;i++) {
fscanf(ifp,"%d",array_valuen+i);
}
//array_index is an array of the index of every property in array_probability
if((array_index=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
exit(0);
}
for(int i=0;i<propertyn;i++) {
array_index[i] = 0;
}
array_length = array_valuen[0] * classn;
array_index[0] = 0;
if(propertyn>1) {
for(int i=1;i<propertyn;i++) {
array_length += array_valuen[i] * classn;
array_index[i] = array_valuen[i-1] * classn + array_index[i-1];
}
}
//the array_class is the array of count of class
if((array_class=(int *)malloc(classn*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
exit(0);
}
memset(array_class,0,classn*sizeof(int));
//the array_property_class is the array of the count of every property of every class
if((array_property_class=(int *)malloc(propertyn*classn*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class);
free(array_index);
exit(0);
}
memset(array_property_class,0,propertyn*classn*sizeof(int));
//array_counts is an array of the pointer of counter of every property of every class
if((array_counts=(int *)malloc(array_length*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_class);
free(array_property_class);
exit(0);
}
memset(array_counts,0,array_length*(sizeof(int)));
printf("Get every needed info\n");
for(int i=0;i<linen;i++) {
fscanf(ifp,"%d",&class_cluster);
array_class[class_cluster]++;
for(int j=0;j<propertyn;j++) {
fscanf(ifp,"%d",&a);
array_counts[ array_index[j] + a*classn + class_cluster ] ++;
array_property_class[j*classn+class_cluster]++;
}
}
//array_class_probability is an array of the classes
if((array_class_probability=(REAL *)malloc(classn*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_counts);
free(array_class);
free(array_property_class);
exit(0);
}
//array_probability is an array of the pointer of probability of every property of every class
if((array_probability=(REAL *)malloc(array_length*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_counts);
free(array_class);
free(array_class_probability);
free(array_property_class);
exit(0);
}
end = microtime();
printf("\nalloc memory and reading data consuming time: %fs\n\n",end-begin);
begin = end;
printf("Get P(C)\n");
//caculate the p(c)
/*#pragma offload target(mic) \
inout(array_class_probability:length(classn)) \
in(array_class:length(classn))
#pragma omp parallel for
*/
cilk_for(int i=0;i<classn;i++) {
array_class_probability[i]=(REAL)(array_class[i]+1)/(REAL)(linen+classn);
}
printf("Get P(A|C)\n");
//caculate the p(a|c)
/*
#pragma offload target(mic) \
inout(array_probability:length(array_length)) \
in(array_counts:length(array_length)) \
in(array_index:length(propertyn)) \
in(array_property_class:length(propertyn*classn)) \
in(array_valuen:length(propertyn))
#pragma omp parallel for
*/
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
array_probability[ array_index[i] + j*classn+k ]=(REAL)( array_counts[ array_index[i] + j*classn+k ] + 1 )/(REAL)(array_property_class[i*classn+k]+array_valuen[i]);
}
}
}
end = microtime();
printf("\ntrain the classifier consuming time: %fs\n\n",end - begin);
begin = end;
//p(c) and p(a|c) is the training result
printf("Outputing the training result to %s\n",ofn);
fprintf(ofp,"%d %d\n",propertyn,classn);
for(int i=0;i<propertyn;i++) {
fprintf(ofp,"%d ",array_valuen[i]);
}
fprintf(ofp,"\n");
for(int i=0;i<classn;i++) {
fprintf(ofp,"%f ",array_class_probability[i]);
}
fprintf(ofp,"\n");
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
fprintf(ofp,"%f ",array_probability[ array_index[i] + j*classn+k]);
}
fprintf(ofp,"\n");
}
}
printf("Recycle all resources\n");
//recycle all resources
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_property_class);
free(array_probability);
free(array_counts);
free(array_class);
free(array_class_probability);
printf("\nPlease DON'T change %s either its name and content!!\n",ofn);
return 0;
}
int caculateNB_classify(char *ifn) {
char *ofn="nb_classify_result.txt",*ifn_classifier="nb_train_result.txt";
FILE *ifp,*ofp,*ifp_classifier;
int i,j,k,max,linen,propertyn,classn,*array_valuen,*array_test_class,*array_test,*array_probability_index,array_probability_length;
REAL *array_class_probability,*array_probability,*array_test_class_probability;
begin = microtime();
if((ifp_classifier=fopen(ifn_classifier,"r"))==NULL)
{
printf("%s file open error!\n",ifn_classifier);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn_classifier);
}
if((ifp=fopen(ifn,"r"))==NULL)
{
printf("%s file open error!\n",ifn);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn);
}
if((ofp=fopen(ofn,"w"))==NULL)
{
printf("%s file open error!\n",ofn);
fclose(ifp);
exit(0);
}
else
{
printf("%s file opened success!\n",ofn);
}
printf("Get base info from %s and configure the classifier\n",ifn_classifier);
fscanf(ifp_classifier,"%d%d",&propertyn,&classn); //propertyn is the number of property of every dataset, classn is the number of classes;
//array_valuen is an array of the max value of every property
if((array_valuen=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
exit(0);
}
for(int i=0;i<propertyn;i++) {
fscanf(ifp_classifier,"%d",array_valuen+i);
}
//array_class_probability is an array of the classes
if((array_class_probability=(REAL *)malloc(classn*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
exit(0);
}
for(int i=0;i<classn;i++) {
fscanf(ifp_classifier,"%f",array_class_probability+i);
}
//array_probability_index is an array of the index of every property in array_probability
if((array_probability_index=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class_probability);
exit(0);
}
for(int i=0;i<propertyn;i++) {
array_probability_index[i] = 0;
}
array_probability_length = array_valuen[0] * classn;
array_probability_index[0] = 0;
if(propertyn>1) {
for(int i=1;i<propertyn;i++) {
array_probability_length += array_valuen[i] * classn;
array_probability_index[i] = array_valuen[i-1] * classn + array_probability_index[i-1];
}
}
//array_probability is an array of probability of every property of every class
if((array_probability=(REAL *)malloc(array_probability_length*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class_probability);
exit(0);
}
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
fscanf(ifp_classifier,"%f",&array_probability[ array_probability_index[i] + j*classn + k ]);
}
}
}
printf("Classifier initialize done!\n");
printf("Begin classify the dataset\n");
fscanf(ifp,"%d",&linen);
//array_test is an array of the input data
if((array_test=(int *)malloc(linen*propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
exit(0);
}
for(int i=0;i<linen*propertyn;i++) {
fscanf(ifp,"%d",array_test+i);
}
//array_test_class is an array of the classify result of every test data record
if((array_test_class=(int *)malloc(linen*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test);
exit(0);
}
//array_test_class_probability is an array of the probability of every test data record of every class
if((array_test_class_probability=(REAL *)malloc(linen*classn*sizeof(REAL)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test_class);
free(array_test);
exit(0);
}
end = microtime();
printf("\nalloc memory and reading data consuming time: %fs\n\n",end-begin);
begin = end;
/*
#pragma offload target(mic) \
nocopy(array_test_class_probability:length(linen*classn) alloc_if(1) free_if(1)) \
in(array_class_probability:length(classn)) \
in(array_probability:length(array_probability_length)) \
in(array_probability_index:length(propertyn)) \
in(array_test:length(linen*propertyn)) \
out(array_test_class:length(linen))
#pragma omp parallel for
*/
cilk_for(int i=0;i<linen;i++) {
for(int j=0;j<classn;j++) {
array_test_class_probability[i*classn+j] = log( array_class_probability[j] );
}
for(int j=0;j<propertyn;j++) {
for(int k=0;k<classn;k++) {
array_test_class_probability[i*classn+k] += log( array_probability[ array_probability_index[j] + array_test[i*propertyn+j]*classn + k] );
// printf("j=%d k=%d p=%f\n",j,k,array_test_class_probability[i*classn+k]);
}
}
// exit(0);
max=0;
for(int j=0;j<classn;j++) {
if(array_test_class_probability[i*classn+j]-array_test_class_probability[i*classn+max]>EPSILON) {
max=j;
}
}
array_test_class[i]=max;
}
end = microtime();
printf("\nclassify the data consuming time: %fs\n\n",end - begin);
begin = end;
printf("Classify done\n");
for(int i=0;i<linen;i++) {
fprintf(ofp,"%d %d\n",i,array_test_class[i]);
}
printf("Result outputed to %s\n",ofn);
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test_class);
free(array_test_class_probability);
free(array_test);
return 0;
}
|
DRB001-antidep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@64:10 vs. a[i]@64:5
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for private(i )
for (i=0; i<len; i++)
a[i]= i;
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
printf ("a[500]=%d\n", a[500] );
return 0;
}
|
GB_bitmap_assign_C_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_C_template: iterate over a bitmap matrix C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The #include'ing file defines a GB_CIJ_WORK macro for the body of the loop,
// which operates on the entry C(iC,jC) at position Cx [pC] and Cb [pC]. The C
// matrix held in bitmap form. If the mask matrix is also a bitmap matrix or
// full matrix, the GB_GET_MIJ macro can compute the effective value of the
// mask for the C(iC,jC) entry.
// C must be bitmap or full. If M is accessed, it must also be bitmap or full.
#ifndef GB_GET_MIJ
#define GB_GET_MIJ(mij,pM) ;
#endif
{
switch (assign_kind)
{
//----------------------------------------------------------------------
// row assignment: C<M'>(iC,:), M is a column vector
//----------------------------------------------------------------------
case GB_ROW_ASSIGN :
{
// iterate over all of C(iC,:)
const int64_t iC = I [0] ;
const int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t jC_start, jC_end, task_cnvals = 0 ;
GB_PARTITION (jC_start, jC_end, cvdim, tid, nthreads) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t pC = iC + jC * cvlen ;
GB_GET_MIJ (mij, jC) ; // mij = Mask (jC)
GB_CIJ_WORK (pC) ; // operate on C(iC,jC)
}
cnvals += task_cnvals ;
}
}
break ;
//----------------------------------------------------------------------
// column assignment: C<M>(:,jC), M is a column vector
//----------------------------------------------------------------------
case GB_COL_ASSIGN :
{
// iterate over all of C(:,jC)
const int64_t jC = J [0] ;
const int64_t pC0 = jC * cvlen ;
const int nthreads = GB_nthreads (cvlen, chunk, nthreads_max) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t iC_start, iC_end, task_cnvals = 0 ;
GB_PARTITION (iC_start, iC_end, cvlen, tid, nthreads) ;
for (int64_t iC = iC_start ; iC < iC_end ; iC++)
{
int64_t pC = iC + pC0 ;
GB_GET_MIJ (mij, iC) ; // mij = Mask (iC)
GB_CIJ_WORK (pC) ; // operate on C(iC,jC)
}
cnvals += task_cnvals ;
}
}
break ;
//----------------------------------------------------------------------
// GrB_assign: C<M>(I,J), M is a matrix the same size as C
//----------------------------------------------------------------------
#ifndef GB_NO_ASSIGN_CASE
case GB_ASSIGN :
{
// iterate over all of C(:,:).
#include "GB_bitmap_assign_C_whole_template.c"
}
break ;
#endif
//----------------------------------------------------------------------
// GxB_subassign: C(I,J)<M>, M is a matrix the same size as C(I,J)
//----------------------------------------------------------------------
#ifndef GB_NO_SUBASSIGN_CASE
case GB_SUBASSIGN :
{
// iterate over all of C(I,J)
#undef GB_IXJ_WORK
#define GB_IXJ_WORK(pC,pA) \
{ \
GB_GET_MIJ (mij, pA) ; /* mij = Mask (pA) */ \
GB_CIJ_WORK (pC) ; /* operate on C(iC,jC) */ \
}
#include "GB_bitmap_assign_IxJ_template.c"
}
break ;
#endif
default: ;
}
}
#undef GB_NO_ASSIGN_CASE
#undef GB_NO_SUBASSIGN_CASE
|
interact.c | /******************************************************************************
* *
* INTERACT.C *
* *
* PROCESS ABSORPTION AND SCATTERING *
* *
******************************************************************************/
#include "decs.h"
#if RADIATION
#if SCATTERING
#ifndef REBALANCE
#define REBALANCE (1)
#endif // rebalance
#endif // scattering
#if RADIATION == RAD_TYPE_LIGHT
#define CONSERVATIVE_BOUND (1)
#else
#define CONSERVATIVE_BOUND (0)
#endif
#if SCATTERING
double conservative_dtau_est(
double nu, int type, int interaction, const struct of_microphysics *m) {
double dl = Rout_rad * L_unit;
double alpha_inv = alpha_inv_scatt(nu, type, interaction, m);
double dtau = (alpha_inv / nu) * dl;
if (fabs(alpha_inv) <= SMALL * SMALL || alpha_inv < 0)
dtau = 1.0;
return dtau;
}
double bound_bias(double bias, double nu, int type, int interaction,
const struct of_microphysics *m, double uph) {
// Another BOUND_BIAS method. Assumes large hotspots, may work fine instead
// assuming ~GM/c^2 length scale for hot spots.
double dtau = conservative_dtau_est(nu, type, interaction, m);
bias = MY_MIN(bias, 1. / (dtau * RAD_SCATT_TYPES));
bias = MY_MAX(bias, 1.);
return bias;
}
double get_scatt_bias(double nu, int type, int interaction,
const struct of_microphysics *m, double uph) {
double Thetae = scatterer_dimensionless_temp(type, interaction, m);
double amp =
1. + 4. * Thetae - 2. * pow(Thetae, 3. / 2.) + 16. * pow(Thetae, 2.);
double bias = tune_scatt * amp;
#if CONSERVATIVE_BOUND
bias = bound_bias(bias, nu, type, interaction, m, uph);
#endif
return bias;
}
void rebalance_biases(
double dtau_scatt[RAD_SCATT_TYPES], double bias_scatt[RAD_SCATT_TYPES]) {
if (RAD_SCATT_TYPES == 1)
return;
double max_prod = -INFINITY;
double prod;
SCATTLOOP {
prod = bias_scatt[iscatt] * dtau_scatt[iscatt];
if (prod > max_prod)
max_prod = prod;
}
SCATTLOOP {
// dtau can underflow. If it does, set the bias to 1.
// This interaction is very subdominant and should be ignored.
if (dtau_scatt[iscatt] < SMALL) {
bias_scatt[iscatt] = 1;
} else {
bias_scatt[iscatt] = max_prod / (dtau_scatt[iscatt] + SMALL);
}
}
}
/*
* Ensure biases obey relation
*
* 0 < sum_{interactions i} ( b_i dtau_i ) = SCATT_BIAS_SAFETY < 1
*
* and that each bias >= 1.
*/
void bound_all_biases(double dtau_scatt[RAD_SCATT_TYPES],
double bias_scatt[RAD_SCATT_TYPES], double nu, double type,
const struct of_microphysics *m) {
double sum = 0;
SCATTLOOP {
// ensure dtau and bias are physical
if (bias_scatt[iscatt] < 1.)
bias_scatt[iscatt] = 1.;
double dtau =
dtau_scatt[iscatt]; // conservative_dtau_est(nu,type,iscatt,m);
double prod = dtau * bias_scatt[iscatt];
sum += prod;
}
double ratio = SCATT_BIAS_SAFETY / sum;
// printf("Sum = %g\nRatio = %g\n",sum,ratio);
if (ratio < 1) { // Shrink all biases so sum is safe
SCATTLOOP bias_scatt[iscatt] *= ratio;
}
// Make sure all biases >= 1.
SCATTLOOP {
if (isnan(bias_scatt[iscatt]) || bias_scatt[iscatt] < 1 ||
isinf(bias_scatt[iscatt])) {
bias_scatt[iscatt] = 1.0;
}
}
}
#endif // SCATTERING
#define MAX_INTERACTIONS 100
void interact(grid_prim_type P, grid_eosvar_type extra, double t, double dt) {
timer_start(TIMER_INTERACT);
#if KILL_ALL_PACKETS
{
#pragma omp parallel
{
struct of_photon *ph = photon_lists[omp_get_thread_num()];
while (ph != NULL) {
if (ph->type != TYPE_TRACER)
ph->w = 0;
ph = ph->next;
}
}
return;
}
#endif
#if ABSORPTION || SCATTERING
// printf("Entering interact.\n"); // DEBUG
const double d3x = dx[1] * dx[2] * dx[3];
#pragma omp parallel
{
int i, j, k;
struct of_photon *ph = photon_lists[omp_get_thread_num()];
struct of_photon *prev = NULL;
struct of_photon *head = ph;
// struct of_microphysics m;
double X[NDIM], Kcon[NDIM], Kcov[NDIM];
// double Ucon[NDIM], Ucov[NDIM], Bcon[NDIM], Bcov[NDIM];
double dlam, nu;
double dtau_abs = 0.;
double dtau_scatt[RAD_SCATT_TYPES];
double bias_scatt[RAD_SCATT_TYPES];
double xabs;
double xscatt[RAD_SCATT_TYPES];
double dlfrac;
int abs_possible, scatt_possible, scatt_to_do;
int nint;
SCATTLOOP {
dtau_scatt[iscatt] = 0.;
bias_scatt[iscatt] = 0.;
}
while (ph != NULL) {
if (ph->w < SMALL || ph->type == TYPE_TRACER) {
prev = ph;
ph = ph->next;
continue;
}
// double tmin = t;
double tmin = MY_MAX(t, ph->t0);
// Loop with decreasing d \lambda to account for multiple interactions
// per superphoton per timestep (scattering only)
nint = 0;
while (nint < MAX_INTERACTIONS) {
nint++;
double dtint = t + dt - tmin;
int status = get_X_K_interp(ph, t + dt / 2., P, X, Kcov, Kcon);
if (status == SPH_INTERP_FAIL) {
prev = ph;
ph = ph->next;
break;
}
dlam = dtint / Kcon[0];
Xtoijk(X, &i, &j, &k);
if (i < 0 || i > N1 + 2 * NG - 1 || j < 0 || j > N2 + 2 * NG - 1 ||
k < 0 || k > N3 + 2 * NG - 1) {
fprintf(stderr, "PHOTON IN BAD PLACE! %i %i %i\n", i, j, k);
print_ph_diag(ph);
#if RADIATION == RADTYPE_NEUTRINOS
record_lepton_flux(ph);
#endif
list_remove(&ph, &head, &prev);
#pragma omp atomic
step_tot--;
break;
}
// Get quantities relating radiation and fluid
// TODO: this copy is unnecessary
// get_fluid_zone(i, j, k, P, extra, &m, Ucon, Ucov, Bcon, Bcov);
/*
DLOOP1 Ucon[mu] = Ucon_grd[i][j][k][mu];
DLOOP1 Ucov[mu] = Ucon_grd[i][j][k][mu];
// DLOOP1 Bcon[mu] = Bcon_grd[i][j][k][mu];
DLOOP1 Bcov[mu] = Bcov_grd[i][j][k][mu];
memcpy((void*)(&m), (void*)(&(m_grd[i][j][k])),
sizeof(struct of_microphysics));
*/
/*
#if ESTIMATE_THETAE
m.Thetae = get_Thetae_est(i, j, k);
#endif
*/
// skip if in atmosphere
#if EOS == EOS_TYPE_TABLE && METRIC == MKS
if (P[i][j][k][ATM] < ATM_THRESH) {
prev = ph;
ph = ph->next;
break;
}
#endif
if (scatt_temp_too_small(&(m_grd[i][j][k]))) {
prev = ph;
ph = ph->next;
break;
}
/*double sigma = pow(m.B/B_unit,2.)/(m.Ne/Ne_unit);
if (sigma > sigma_max) {
prev = ph;
ph = ph->next;
break;
}*/
nu = get_fluid_nu(X, Kcov, Ucon_grd[i][j][k]);
if (nu <= 0 || is_practically_nan(nu)) {
double gamma;
mhd_gamma_calc(P[i][j][k], &(ggeom[i][j][CENT]), &gamma);
fprintf(stderr,
"Bad NU in interact [%i %i %i]\n"
"\tNU = %e\n"
"\tgamma = %e\n"
"\tX[] = [%e %e %e %e]\n"
"\tKcov[] = [%e %e %e %e]\n"
"\tUcon[] = [%e %e %e %e]\n",
i, j, k, nu, gamma, X[0], X[1], X[2], X[3], Kcov[0], Kcov[1],
Kcov[2], Kcov[3], Ucon_grd[i][j][k][0], Ucon_grd[i][j][k][1],
Ucon_grd[i][j][k][2], Ucon_grd[i][j][k][3]);
#if RADIATION == RADTYPE_NEUTRINOS
record_lepton_flux(ph);
#endif
list_remove(&ph, &head, &prev);
#pragma omp atomic
step_tot--;
break;
}
// Superphoton type
// DEBUGGING
/*
#if RADIATION == RADTYPE_NEUTRINOS
if (ph->type < 0 || ph->type > RAD_NUM_TYPES) {
fprintf(stderr,
"[interact] PHOTON HAS BAD TYPE!\n"
"\tw = %g\n"
"\tKdotKprev = %g\n"
"\ttype = %d\n"
"\tnscatt = %d\n"
"\t[X][0] = [%g, %g, %g %g]\n"
"\t[X][1] = [%g, %g, %g %g]\n"
"\t[X][2] = [%g, %g, %g %g]\n"
"\t[Kcon][0] = [%g, %g, %g %g]\n"
"\t[Kcon][1] = [%g, %g, %g %g]\n"
"\t[Kcon][2] = [%g, %g, %g %g]\n"
"\t[Kcov][0] = [%g, %g, %g %g]\n"
"\t[Kcov][1] = [%g, %g, %g %g]\n"
"\t[Kcov][2] = [%g, %g, %g %g]\n"
"\t[origin] = [%d, %d, %d %d]\n"
"\tt0 = %g\n"
"\tis_tracked = %d\n",
location,
ph->w,
ph->KdotKprev,
ph->type,
ph->nscatt,
ph->X[0][0],ph->X[0][1],ph->X[0][2],ph->X[0][3],
ph->X[1][0],ph->X[1][1],ph->X[1][2],ph->X[1][3],
ph->X[2][0],ph->X[2][1],ph->X[2][2],ph->X[2][3],
ph->Kcon[0][0],ph->Kcon[0][1],ph->Kcon[0][2],ph->Kcon[0][3],
ph->Kcon[1][0],ph->Kcon[1][1],ph->Kcon[1][2],ph->Kcon[1][3],
ph->Kcon[2][0],ph->Kcon[2][1],ph->Kcon[2][2],ph->Kcon[2][3],
ph->Kcov[0][0],ph->Kcov[0][1],ph->Kcov[0][2],ph->Kcov[0][3],
ph->Kcov[1][0],ph->Kcov[1][1],ph->Kcov[1][2],ph->Kcov[1][3],
ph->Kcov[2][0],ph->Kcov[2][1],ph->Kcov[2][2],ph->Kcov[2][3],
ph->origin[0],ph->origin[1],ph->origin[2],ph->origin[3],
ph->t0,
ph->is_tracked
);
record_lepton_flux(ph);
list_remove(&ph, &head, &prev);
#pragma omp atomic
step_tot--;
// break;
exit(1);
}
#endif
*/
// Calculate and sample optical depths along step
#if ABSORPTION
double theta = get_bk_angle(
X, Kcon, Ucov_grd[i][j][k], Bcov_grd[i][j][k], m_grd[i][j][k].B);
dtau_abs = ((HPL * L_unit / (ME * CL * CL)) * dlam *
alpha_inv_abs(nu, ph->type, &(m_grd[i][j][k]), theta));
// DEBUG
/*
if (dtau_abs > 1e2) {
printf("dtau_abs = %g\n"
"nu = %g\n"
"type = %d\n"
"alpha = %g\n"
"rho = %g\n"
"T = %g\n"
"Ye = %g\n"
"[i,j,k] = [%d, %d, %d]\n",
dtau_abs,nu,ph->type,
alpha_inv_abs(nu, ph->type,
&(m_grd[i][j][k]),
theta)/nu,
m_grd[i][j][k].rho,
m_grd[i][j][k].T/MEV,
m_grd[i][j][k].Ye,
i,j,k);
}
*/
#endif
/* Strategy:
* 1. Calculate all biases based on global bias and bound them
* 2. calculate dtau*bias for all interactions
* 3. Set every bias so that dtau*bias is equal to largest dtau*bias
* for all interactions
* 4. The total probability of interaction cannot be greater than 1.
* Therefore we demand
* sum_{interactions i} (b_i dtau_i) < 1
* We check this after rebalancing the biases and enforce it
* conservatively.
*
* NOTE: The idea is that weakly/undersampled interactions will be
* enhanced while maintaining stability.
* There's a finite "bias" budget we can spend and we want to
* spend it enhancing the interactions that need it.
* NOTE: there are probably wasted FLOPS in this implementation
* TODO: check to see if these inefficiencies matter
* ~JMM
*/
#if SCATTERING
{
double uph =
HPL * nu * ph->w / (ggeom[i][j][CENT].g * d3x * pow(L_unit, 3));
SCATTLOOP {
dtau_scatt[iscatt] =
((HPL * L_unit / (ME * CL * CL)) * dlam *
alpha_inv_scatt(nu, ph->type, iscatt, &(m_grd[i][j][k])));
bias_scatt[iscatt] =
get_scatt_bias(nu, ph->type, iscatt, &(m_grd[i][j][k]), uph);
}
#if REBALANCE
{
/*
printf("Raw:\n" // DEBUG
"\tprod = [ %g %g %g %g ]\n"
"\tbias = [ %g %g %g %g ]\n"
"\tdtau = [ %g %g %g %g ]\n",
bias_scatt[0]*dtau_scatt[0],
bias_scatt[1]*dtau_scatt[1],
bias_scatt[2]*dtau_scatt[2],
bias_scatt[3]*dtau_scatt[3],
bias_scatt[0],bias_scatt[1],
bias_scatt[2],bias_scatt[3],
dtau_scatt[0],dtau_scatt[1],
dtau_scatt[2],dtau_scatt[3]);
*/
rebalance_biases(dtau_scatt, bias_scatt);
/*
printf("Rebalanced:\n" // DEBUG
"\tprod = [ %g %g %g %g ]\n"
"\tbias = [ %g %g %g %g ]\n"
"\tdtau = [ %g %g %g %g ]\n",
bias_scatt[0]*dtau_scatt[0],
bias_scatt[1]*dtau_scatt[1],
bias_scatt[2]*dtau_scatt[2],
bias_scatt[3]*dtau_scatt[3],
bias_scatt[0],bias_scatt[1],
bias_scatt[2],bias_scatt[3],
dtau_scatt[0],dtau_scatt[1],
dtau_scatt[2],dtau_scatt[3]);
*/
bound_all_biases(
dtau_scatt, bias_scatt, nu, ph->type, &(m_grd[i][j][k]));
#if CONSERVATIVE_BOUND
SCATTLOOP {
bias_scatt[iscatt] = bound_bias(bias_scatt[iscatt], nu, ph->type,
iscatt, &(m_grd[i][j][k]), uph);
}
#endif
/*
printf("Bounded:\n" // DEBUG
"\tprod = [ %g %g %g %g ]\n"
"\tbias = [ %g %g %g %g ]\n"
"\tdtau = [ %g %g %g %g ]\n",
bias_scatt[0]*dtau_scatt[0],
bias_scatt[1]*dtau_scatt[1],
bias_scatt[2]*dtau_scatt[2],
bias_scatt[3]*dtau_scatt[3],
bias_scatt[0],bias_scatt[1],
bias_scatt[2],bias_scatt[3],
dtau_scatt[0],dtau_scatt[1],
dtau_scatt[2],dtau_scatt[3]);
*/
}
#else // NO REBALANCE
{
SCATTLOOP {
bias_scatt[iscatt] = bound_bias(bias_scatt[iscatt], nu, ph->type,
iscatt, &(m_grd[i][j][k]), uph);
}
}
#endif // REBALANCE
}
#endif // SCATTERING
// random variables
xabs = -log(get_rand());
SCATTLOOP { xscatt[iscatt] = -log(get_rand()) / bias_scatt[iscatt]; }
// are we absorbing and/or scattering?
abs_possible = (xabs <= dtau_abs) && ABSORPTION;
scatt_possible = SCATTERING;
if (scatt_possible) {
SCATTLOOP {
scatt_possible =
(scatt_possible && (xscatt[iscatt] <= dtau_scatt[iscatt]));
}
}
// No interaction
if (!(abs_possible || scatt_possible)) {
prev = ph;
ph = ph->next;
break;
}
// Absorption
#if ABSORPTION
int do_abs = abs_possible;
if (abs_possible) {
double absfrac = xabs / (dtau_abs + SMALL);
SCATTLOOP {
double scattfrac = xscatt[iscatt] / (dtau_scatt[iscatt] + SMALL);
do_abs = do_abs && (absfrac < scattfrac);
}
}
if (do_abs) {
dlfrac = xabs / dtau_abs;
double tabs = tmin + dlfrac * dtint;
int status = get_X_K_interp(ph, tabs, P, X, Kcov, Kcon);
if (status == SPH_INTERP_FAIL) {
prev = ph;
ph = ph->next;
break;
}
Xtoijk(X, &i, &j, &k);
// Boundary transport cannot use MPI with one zone
if (N1 == 1)
i = NG;
if (N2 == 1)
j = NG;
if (N3 == 1)
k = NG;
for (int mu = 0; mu < NDIM; mu++) {
#pragma omp atomic
radG[i][j][k][mu] +=
1. / (dt * d3x) * ph->w * kphys_to_num * Kcov[mu];
}
#if RADIATION == RADTYPE_NEUTRINOS
{
#pragma omp atomic
radG[i][j][k][RADG_YE] +=
((1. / (dt * d3x)) * Ucon_grd[i][j][k][0] * ph->w *
(MP / M_unit) * get_lepton_sign(ph));
}
#endif
Jrad[1][i][j][k] += (dt / DTd) * ph->Kcov[2][0] * kphys_to_num *
ph->w / (ggeom[i][j][CENT].g * dt * d3x);
#pragma omp atomic
step_abs++;
#pragma omp atomic
Nabs[i][j][k]++;
#pragma omp atomic
Nabs_phys[i][j][k][ph->type] += ph->w;
if (dtau_abs < 100) {
#pragma omp atomic
dtau_avg[0][i][j][k] +=
dtau_abs * (-ph->Kcov[2][0] * ME * CL * CL) * ph->w;
#pragma omp atomic
en_int_avg[0][i][j][k] += (-ph->Kcov[2][0] * ME * CL * CL) * ph->w;
}
ph->w = 0.;
tmin = tabs;
prev = ph;
ph = ph->next;
break;
}
#endif
else { // scattering
// DEBUG
/*
printf("\tScattering superphoton\n"
"\tBounded:\n"
"\t\tprod = [ %g %g %g %g ]\n"
"\t\tbias = [ %g %g %g %g ]\n"
"\t\tdtau = [ %g %g %g %g ]\n",
bias_scatt[0]*dtau_scatt[0],
bias_scatt[1]*dtau_scatt[1],
bias_scatt[2]*dtau_scatt[2],
bias_scatt[3]*dtau_scatt[3],
bias_scatt[0],bias_scatt[1],
bias_scatt[2],bias_scatt[3],
dtau_scatt[0],dtau_scatt[1],
dtau_scatt[2],dtau_scatt[3]);
*/
// Of all possible scattering interactions,
// we perform the one with the smallest fraction x/dtau
scatt_to_do = 0;
dlfrac = xscatt[0] / (dtau_scatt[0] + SMALL);
SCATTLOOP {
double scattfrac = xscatt[iscatt] / (dtau_scatt[iscatt] + SMALL);
if (scattfrac < dlfrac) {
scatt_to_do = iscatt;
dlfrac = scattfrac;
}
}
// printf("scatt_to_do = %d\n",scatt_to_do); // DEBUG
double tscatt = tmin + dlfrac * dtint;
int status;
status = get_X_K_interp(ph, tscatt, P, X, Kcov, Kcon);
if (status == SPH_INTERP_FAIL) {
prev = ph;
ph = ph->next;
break;
}
struct of_photon *phscatt = safe_malloc(sizeof(struct of_photon));
if (get_rand() < Nph_to_track / (nph_per_proc * mpi_nprocs())) {
phscatt->is_tracked = 1;
} else {
phscatt->is_tracked = 0;
}
Xtoijk(X, &i, &j, &k);
// Initialize scattered photon at position of scattering event
for (int mu = 0; mu < NDIM; mu++) {
phscatt->X[2][mu] = X[mu];
phscatt->Kcov[2][mu] = Kcov[mu];
phscatt->Kcon[2][mu] = Kcon[mu];
for (int n = 0; n < 2; n++) {
phscatt->X[n][mu] = 0.;
phscatt->Kcov[n][mu] = 0.;
phscatt->Kcon[n][mu] = 0.;
}
}
phscatt->t0 = tscatt;
phscatt->w = ph->w / bias_scatt[scatt_to_do];
phscatt->type = ph->type;
phscatt->nscatt = ph->nscatt + 1;
phscatt->origin[0] = nstep;
phscatt->origin[1] = i;
phscatt->origin[2] = j;
phscatt->origin[3] = k;
double wsave = ph->w;
ph->w = (1. - 1. / bias_scatt[scatt_to_do]) * ph->w;
int success = scatter_superphoton(
P, extra, phscatt, X, Kcov, Kcon, scatt_to_do);
if (!success) {
#pragma omp atomic
step_fail++;
free(phscatt);
prev = ph;
ph = ph->next;
break;
}
// Need to reset K.K
phscatt->KdotKprev = 0.;
for (int mu = 0; mu < NDIM; mu++) {
phscatt->KdotKprev += phscatt->Kcov[2][mu] * phscatt->Kcon[2][mu];
}
// Boundary transport cannot use MPI with one zone
if (N1 == 1)
i = NG;
if (N2 == 1)
j = NG;
if (N3 == 1)
k = NG;
// Apply four-force at interaction site
for (int mu = 0; mu < NDIM; mu++) {
#pragma omp atomic
radG[i][j][k][mu] += 1. / (dt * d3x) * phscatt->w * kphys_to_num *
(Kcov[mu] - phscatt->Kcov[2][mu]);
} // TODO: lepton number unchanged for elastic scattering?
int nscatt = MY_MIN(ph->nscatt, MAXNSCATT - 1);
#pragma omp atomic
Jrad[nscatt + 2][i][j][k] -=
(dt / DTd) *
((phscatt->Kcov[2][0] - Kcov[0]) * kphys_to_num * phscatt->w) /
(ggeom[i][j][CENT].g * dt * d3x);
#pragma omp atomic
dtau_avg[scatt_to_do + 1][i][j][k] +=
dtau_scatt[scatt_to_do] * (-ph->Kcov[2][0] * ME * CL * CL) *
wsave;
#pragma omp atomic
en_int_avg[scatt_to_do + 1][i][j][k] +=
(-ph->Kcov[2][0] * ME * CL * CL) * wsave;
// push phscatt as far as possible
// should not use separate step and half-step for this push
status = push_superphoton(phscatt, P, P, cour * dt_light[i][j]);
if (status == PUSH_FAIL) {
free(phscatt);
prev = ph;
ph = ph->next;
#pragma omp atomic
step_fail++;
break;
}
#pragma omp atomic
step_scatt++;
#pragma omp atomic
step_tot++;
#pragma omp atomic
Nsc[i][j][k]++;
// Add scattered superphoton to list
phscatt->next = ph->next;
ph->next = phscatt;
// Allow for additional scatterings
tmin = tscatt;
continue;
}
}
} // ph != NULL
photon_lists[omp_get_thread_num()] = head;
} // omp parallel
#endif // ABSORPTION || SCATTERING
timer_stop(TIMER_INTERACT);
}
#endif // RADIATION
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
//#include "divsufsort_private.h"
#include "/home/z/zd4/SA/libdivsufsort/include/divsufsort_private.h"
#ifdef _OPENMP
# include <omp.h>
#endif
/*- Private Functions -*/
/* Sorts suffixes of type B*. */
static
saidx_t
sort_typeBstar(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n) {
saidx_t *PAb, *ISAb, *buf;
#ifdef _OPENMP
saidx_t *curbuf;
saidx_t l;
#endif
saidx_t i, j, k, t, m, bufsize;
saint_t c0, c1;
#ifdef _OPENMP
saint_t d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
saidx_t
construct_BWT(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k, *orig;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((saidx_t)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
saint_t
divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) {
saidx_t *bucket_A, *bucket_B;
saidx_t m;
saint_t err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
saidx_t
divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) {
saidx_t *B;
saidx_t *bucket_A, *bucket_B;
saidx_t m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
const char *
divsufsort_version(void) {
return "2.0.1-14-g5f60d6f";
// return PROJECT_VERSION_FULL;
}
|
a.35.3.c | /* { dg-do compile } */
void work (int, int);
void
wrong3 (int n)
{
#pragma omp parallel default(shared)
{
int i;
#pragma omp for
for (i = 0; i < n; i++)
{
/* incorrect nesting of regions */
#pragma omp single /* { dg-error "may not be closely nested" } */
work (i, 0);
}
}
}
|
pca_mrrr.c | #include <stdio.h>
#include <stdlib.h>
// include files for optimized libraries
#if defined USE_ESSL
#include <essl.h>
#elif defined USE_MKL
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#elif defined USE_LAPACK
#include <cblas.h>
#include <lapacke.h>
#endif
// interface to f2c code
#include "f2c.h"
#include "mrrr.h"
static inline int PCA_ssytd2(char uplo, int n, real *a, int lda, real *d, real *e, real *tau)
{
int info;
ssytd2_(&uplo, &n, a, &lda, d, e, tau, &info, 1);
return info;
}
static inline int PCA_sstemr(char jobz, char range, int n, real *d, real *e, real vl, real vu,
int il, int iu, int *m, real *w, real *z, int ldz, int nzc, int *isuppz, int *tryrac,
real *work, int lwork, int *iwork, int liwork)
{
int info;
pca_sstemr__(&jobz, &range, &n, d, e, &vl, &vu, &il, &iu, m, w, z, &ldz, &nzc, isuppz,
tryrac, work, &lwork, iwork, &liwork, &info, 1, 1);
return info;
}
static inline int PCA_sorm2l(char side, char trans, int m, int n, int k, real *a, int lda, real *tau, real *c, int ldc, real *work)
{
int info;
sorm2l_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, &info, 1, 1);
return info;
}
#ifndef NO_PULP
#include "utils.h"
#include "hwTrace.h"
#endif
#if 1
#define ALLOC(t, v, s) t v[s];
#define FREE(v)
#else
#define ALLOC(t, v, s) t *v = malloc(sizeof(t) * (s));
#define FREE(v) free(v);
#endif
// PCA main routine
// input is a column-major matrix with a row for each sample and a column for each variable
// output is a column-major matrix with a row for each sample and a column for each component
void PCA_mrrr(int samples, int variables, float *input, int components, float *output)
{
int lwork = 18 * variables;
int liwork = 10 * variables;
ALLOC(real, A, variables * variables);
ALLOC(real, T, samples * variables);
ALLOC(real, d, variables);
ALLOC(real, e, variables);
ALLOC(real, tau, variables);
ALLOC(int, isuppz, variables * 2);
ALLOC(real, w, variables);
ALLOC(real, Z, variables * variables);
ALLOC(real, work, lwork);
ALLOC(int, iwork, liwork);
// pulp_trace_kernel_declare(0, "kernel 0");
// pulp_trace_kernel_start(0, 1);
// compute and subtract mean
for (int j = 0; j < variables; j++) {
real mean = 0.0;
#pragma omp parallel for reduction(+:mean)
for (int i = 0; i < samples; i++)
mean += input[j * samples + i];
mean /= samples;
#pragma omp parallel for
for (int i = 0; i < samples; i++)
T[j * samples + i] = input[j * samples + i] - mean;
}
// compute A=T^T*T
for (int j = 0; j < variables; j++)
for (int i = 0; i <= j; i++) {
real dot = 0;
#pragma omp parallel for reduction(+:dot)
for (int k = 0; k < samples; k++)
dot += T[j * samples + k] * T[i * samples + k];
A[i + j * variables] = dot;
}
// tridiagonalization
#if defined USE_MKL || USE_LAPACK
int info = LAPACKE_ssytrd(LAPACK_COL_MAJOR, 'U', variables, A, variables, d, e, tau);
#else
int info = PCA_ssytd2('U', variables, A, variables, d, e, tau);
#endif
if (info != 0) {
printf("Error in SSYTRD/SSYTD2: %i\n", info);
abort();
}
// compute eigenvalues
int il = variables - components + 1, iu = variables, m, tryrac = 1;
real vl = 0.0, vu = 0.0;
info = PCA_sstemr('V', 'I', variables, d, e, vl, vu, il, iu, &m, w, Z, variables, variables,
isuppz, &tryrac, work, lwork, iwork, liwork);
if (info != 0) {
printf("Error in SSTEMR: %i\n", info);
abort();
}
printf("%d: ", m);
for (int i = 0; i < m; i++) printf("%d ", (int)w[i]); printf("\n");
// compute eigenvectors
#if defined USE_MKL || USE_LAPACK
info = LAPACKE_sormtr(LAPACK_COL_MAJOR, 'L', 'U', 'N', variables, m, A, variables, tau,
Z, variables);
#else
info = PCA_sorm2l('L', 'N', variables - 1, m, variables - 1, A + variables, variables, tau,
Z, variables, work);
#endif
if (info != 0) {
printf("Error in SORMTR/SORM2L: %i\n", info);
abort();
}
#if defined USE_ESSL || USE_MKL || USE_LAPACK
cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, samples, components, variables,
1.0, T, samples, Z, variables, 0.0, output, samples);
#else
#pragma omp parallel for
for (int i = 0; i < samples; i++)
for (int j = 0; j < components; j++) {
real t = 0;
for (int k = 0; k < variables; k++)
t += T[i + k * samples] * Z[j * variables + k];
output[i + j * samples] = t;
}
#endif
FREE(T);
FREE(A);
FREE(e);
FREE(d);
FREE(tau);
FREE(w);
FREE(Z);
FREE(isuppz);
FREE(work);
FREE(iwork);
// pulp_trace_kernel_stop(0, 1);
}
|
SimulatorBase.h | /*
Menge Crowd Simulation Framework
Copyright and trademark 2012-17 University of North Carolina at Chapel Hill
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
or
LICENSE.txt in the root of the Menge repository.
Any questions or comments should be sent to the authors menge@cs.unc.edu
<http://gamma.cs.unc.edu/Menge/>
*/
#ifndef __SIMULATOR_BASE_H__
#define __SIMULATOR_BASE_H__
/*!
@file SimulatorBase.h
@brief Contains the SimulatorBase class - the common, generic simulator to work with different
types of agents. It is templated on the Agent type.
*/
#include "MengeCore/Agents/AgentInitializer.h"
#include "MengeCore/Agents/SimulatorInterface.h"
#include "MengeCore/Agents/SpatialQueries/SpatialQuery.h"
#include "MengeCore/Runtime/Utils.h"
#include "MengeCore/mengeCommon.h"
#include <vector>
#if HAVE_OPENMP || _OPENMP
#include <omp.h>
#endif
namespace Menge {
namespace Agents {
/*!
@brief Defines the basic simulator. It is responsible for tracking agents and obstacles as
well as initializing such from files.
*/
template <class Agent>
class SimulatorBase : public SimulatorInterface {
public:
/*!
@brief Constructs a simulator instance.
*/
SimulatorBase();
/*!
@brief Destorys a simulator instance.
*/
~SimulatorBase();
/*!
@brief Lets the simulator perform a simulation step and updates the two-dimensional _p and
two-dimensional velocity of each agent.
*/
void doStep();
/*!
@brief Initalize spatial query structure.
*/
virtual bool initSpatialQuery();
/*!
@brief After all agents and all obstacles have been added to the scene does the work to finish
preparing the simulation to be run.
This work is performed when the simulator is done being initialized. If a particular new
pedestrian simulator requires particular finalization work, this function should be sub-classed
and the parent class's version of the function should be explicitly called before any additional
work is performed.
*/
virtual void finalize();
/*!
@brief Accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the
same as the agent identifier. It is merely the local index of the agent
in the simulator's local store.
@returns A pointer to the agent.
*/
virtual BaseAgent* getAgent(size_t agentNo) { return &_agents[agentNo]; }
/*!
@brief Const accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the same
as the agent identifier. It is merely the local index of the agent in the
simulator's local store.
@returns A pointer to the agent.
*/
virtual const BaseAgent* getAgent(size_t agentNo) const { return &_agents[agentNo]; }
/*!
@brief Add an agent with specified position to the simulator whose properties are defined by
the given agent initializer.
It uses the agent initializer to define the values of the remaining agent parameters.
@param pos The 2d vector representing the agent's position.
@param agentInit The AgentInitializer necessary to parse AgentSet properties.
@returns A pointer to the agent (if initialization was succesful) or NULL if failed.
*/
virtual BaseAgent* addAgent(const Vector2& pos, AgentInitializer* agentInit);
/*!
@brief Returns the count of agents in the simulation.
@returns The count of agents in the simulation.
*/
virtual size_t getNumAgents() const { return _agents.size(); }
/*!
@brief Reports if there are non-common Experiment parameters that this simulator requires in
the XML file.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool hasExpTarget() { return false; }
/*!
@brief Reports if the given Experiment attribute tag name belongs to this simulator.
@param tagName The name of the candidate experiment XML tag.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool isExpTarget(const std::string& tagName) { return false; }
/*!
@brief Given an Experiment parameter name and value, sets the appropriate simulator
parameter.
// TODO: Define the conditions of success/failure.
@param paramName A string containing the parameter name for the experiment.
@param value A string containing the value for the parameter.
@returns True if the parameter was successfully set, false otherwise.
*/
virtual bool setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException);
protected:
/*!
@brief Computes the neighbors for the given agent.
@param agent The agent whose neighbors are to be computed.
*/
void computeNeighbors(Agent* agent);
/*!
@brief The collection of agents in the simulation
*/
std::vector<Agent> _agents;
};
////////////////////////////////////////////////////////////////
// Implementation of SimulatorBase
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::SimulatorBase() : SimulatorInterface(), _agents() {}
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::~SimulatorBase() {
_agents.clear();
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::doStep() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
// TODO: want to know
_spatialQuery->updateAgents();
int AGT_COUNT = static_cast<int>(_agents.size());
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
computeNeighbors(&(_agents[i]));
_agents[i].computeNewVelocity();
}
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
_agents[i].update(TIME_STEP);
}
_globalTime += TIME_STEP;
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::initSpatialQuery() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
const size_t AGT_COUNT = _agents.size();
std::vector<BaseAgent*> agtPointers(AGT_COUNT);
for (size_t a = 0; a < AGT_COUNT; ++a) {
agtPointers[a] = &_agents[a];
}
_spatialQuery->setAgents(agtPointers);
_spatialQuery->processObstacles();
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::finalize() {
SimulatorInterface::finalize();
// initialize agents
for (size_t i = 0; i < _agents.size(); ++i) {
_agents[i].initialize();
}
}
////////////////////////////////////////////////////////////////
template <class Agent>
BaseAgent* SimulatorBase<Agent>::addAgent(const Vector2& pos, AgentInitializer* agentInit) {
Agent agent;
agent._pos = pos;
agent._id = _agents.size();
if (!agentInit->setProperties(&agent)) {
logger << Logger::ERR_MSG << "Error initializing agent " << agent._id << "\n";
return 0x0;
}
_agents.push_back(agent);
return &_agents[_agents.size() - 1];
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException) {
if (paramName == "time_step") {
try {
LOGICAL_TIME_STEP = toFloat(value);
} catch (UtilException) {
throw XMLParamException(
std::string("Common parameters \"time_step\" value couldn't be converted "
"to a float. Found the value: ") +
value);
}
} else {
return false;
}
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::computeNeighbors(Agent* agent) {
// obstacles
agent->startQuery();
_spatialQuery->obstacleQuery(agent);
// agents
if (agent->_maxNeighbors > 0) {
_spatialQuery->agentQuery(agent);
}
}
} // namespace Agents
} // namespace Menge
#endif // __SIMULATOR_BASE_H__
|
erotima_2a-i.c | #include <stdio.h>
#include <math.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#include <string.h>
#define MIN_NUM_OF_NEURONS (1L)
#define DEF_NUM_OF_NEURONS (1000L)
#define MIN_NUM_OF_NEIGHBORS (0L)
#define DEF_NUM_OF_NEIGHBORS (300L)
#define DEF_DT (1.0e-04)
#define DEF_MU (1.0)
#define DEF_UTH (0.98)
#define DEF_S_MIN (0.7)
#define DEF_S_MAX (0.7)
#define DEF_SIM_TIME (20L)
#define DEF_TTRANSIENT (-1L)
void print_thread();
static struct option long_options[] =
{
{"dt", required_argument, 0, 'a'},
{"mu", required_argument, 0, 'b'},
{"uth", required_argument, 0, 'c'},
{"time", required_argument, 0, 'd'},
{"transient", required_argument, 0, 'e'},
{"s_min", required_argument, 0, 'f'},
{"s_max", required_argument, 0, 'g'},
{"n", required_argument, 0, 'n'},
{"r", required_argument, 0, 'r'},
{0, 0, 0, 0}
};
int main(int argc, char *argv[])
{
FILE *output1, *output2;
long n, r;
long i, j;
long it;
double divide;
double dt;
double tstep;
long ntstep;
long sim_time;
long ttransient;
long itime;
double uth;
double mu;
double s_min;
double s_max;
double *u, *uplus, *sigma, *omega, *omega1, *temp_u;
// double sum;
double time;
struct timeval global_start, global_end, IO_start, IO_end;
double global_usec, IO_usec = 0.0;
int c, option_index;
char *end_ptr;
n = DEF_NUM_OF_NEURONS;
r = DEF_NUM_OF_NEIGHBORS;
dt = DEF_DT;
mu = DEF_MU;
uth = DEF_UTH;
s_min = DEF_S_MIN;
s_max = DEF_S_MAX;
sim_time = DEF_SIM_TIME;
ttransient = DEF_TTRANSIENT;
while (1) {
c = getopt_long (argc, argv, "+n:r:", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
case 'a':
dt = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (dt <= 0.0) {
printf("Option \"%s\": \"dt\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'b':
mu = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (mu <= 0.0) {
printf("Option \"%s\": \"mu\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'c':
uth = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (uth <= 0.0) {
printf("Option \"%s\": \"uth\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'd':
sim_time = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (sim_time < 1) {
printf("Option \"%s\": Total simulation time must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'e':
ttransient = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (ttransient < 0) {
printf("Option \"%s\": \"ttransient\" must be larger or equal than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'f':
s_min = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (s_min <= 0.0) {
printf("Option \"%s\": \"s_min\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'g':
s_max = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (s_max <= 0.0) {
printf("Option \"%s\": \"s_max\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'n':
n = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (n < MIN_NUM_OF_NEURONS) {
printf("Option \"%s\": Number of neurons must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEURONS);
exit(1);
}
break;
case 'r':
r = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (r < MIN_NUM_OF_NEIGHBORS) {
printf("Option \"%s\": Number of neighbors must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEIGHBORS);
exit(1);
}
break;
case '?':
default:
exit(1);
break;
}
}
if (optind != argc) {
printf("Unknown option \"%s\".\n", argv[optind]);
exit(1);
}
if (2 * r + 1 > n) {
printf("Total number of neighbors and reference neuron (2 * %ld + 1 = %ld) cannot exceed number of neurons (%ld).\n", r, 2 * r + 1, n);
exit(1);
}
if (s_min > s_max) {
printf("s_min (%17.15f) must be smaller or equal than s_max (%17.15f).\n", s_min, s_max);
exit(1);
}
divide = (double)(2 * r);
tstep = 1.0 / dt;
ntstep = (long)tstep;
if (ttransient == DEF_TTRANSIENT) {
ttransient = (sim_time * ntstep) / 2;
} else {
ttransient *= ntstep;
}
itime = sim_time * ntstep;
printf("Running simulation with following parameters:\n");
printf(" Number of neurons : %ld\n", n);
printf(" Numger of neighbours: %ld\n", r);
printf(" Simulation time : %ld seconds (%ld time steps)\n", sim_time, itime);
printf(" Transient time : %ld seconds (%ld time steps)\n", ttransient / ntstep, ttransient);
printf(" dt : %.1e seconds \n", dt);
printf(" mu : %17.15f\n", mu);
printf(" uth : %17.15f\n", uth);
printf(" s_min : %17.15f\n", s_min);
printf(" s_max : %17.15f\n", s_max);
output1 = fopen("spacetime.out", "w");
if (output1 == NULL) {
printf("Could not open file \"spacetime.out\"");
exit(1);
}
output2 = fopen("omega.out", "w");
if (output2 == NULL) {
printf("Could not open file \"omega.out\"");
exit(1);
}
u = (double *)calloc(n, sizeof(double));
if (u == NULL) {
printf("Could not allocate memory for \"u\".\n");
exit(1);
}
uplus = (double *)calloc(n, sizeof(double));
if (uplus == NULL) {
printf("Could not allocate memory for \"uplus\".\n");
exit(1);
}
sigma = (double *)calloc(n * n, sizeof(double));
if (sigma == NULL) {
printf("Could not allocate memory for \"sigma\".\n");
exit(1);
}
omega = (double *)calloc(n, sizeof(double));
if (omega == NULL) {
printf("Could not allocate memory for \"omega\".\n");
exit(1);
}
omega1 = (double *)calloc(n, sizeof(double));
if (omega1 == NULL) {
printf("Could not allocate memory for \"omega1\".\n");
exit(1);
}
temp_u = (double *)calloc(n, sizeof(double));
if (temp_u == NULL) {
printf("Could not allocate memory for \"temp_u\".\n");
exit(1);
}
for (i = 0; i < n;) {
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
}
/*
Read connectivity matrix sigma[n][n] from file or
construct connectivity matrix.
*/
for (i = 0; i < r; i++) {
for (j = 0; j < i + r + 1; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
for (j = n - r + i; j < n; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
}
for (i = r; i < n - r; i++) {
for (j = 0; j < 2 * r + 1; j++) {
sigma[i * n + j + i - r] = s_min + (s_max - s_min) * drand48();
}
}
for (i = n - r; i < n; i++) {
for (j = 0; j < i - n + r + 1; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
for (j = i - r; j < n; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
}
#if 0
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
printf("%4.1f", sigma[i * n + j]);
}
printf("\n");
}
#endif
/*
Temporal iteration.
*/
gettimeofday(&global_start, NULL);
double sum;
double temp;
int step;
double semi_sum=0.0;
omp_set_dynamic(0); // Explicitly disable dynamic teams
// omp_set_num_threads(2); // Use 4 threads for all consecutive parallel regions
for (i = 0; i < n;i++) {
semi_sum+=sigma[i];
}
#pragma omp parallel private(it,i,j,sum,temp,step) firstprivate(n,sigma,dt,mu,semi_sum)
{
for (it = 0; it < itime; it++) {
#pragma omp for schedule(static,8)
for (i = 0; i < n; i++) {
sum = 0.0;
#pragma omp atomic read
temp = u[i];
step = i * n;
#pragma omp atomic write
uplus[i] = temp + dt * (mu - temp);
for (j = 0; j < n;j++) {
sum += sigma[step + j] * u[j];
}
#pragma omp atomic update
uplus[i]+= dt * (sum - semi_sum*temp) / divide;
// temp = uplus[i];
// // temp_u[i] = uplus[i];
if (uplus[i] > uth) {
#pragma omp atomic write
uplus[i] = 0.0;
if (it >= ttransient) {
#pragma omp atomic
omega1[i] += 1.0;
}
}
}
#pragma omp barrier
// #pragma omp for ordered
// for (i = 0; i < n; i++) {
// #pragma omp atomic write
// u[i] = uplus[i];
// if (u[i] > uth) {
// #pragma omp atomic write
// u[i] = 0.0;
// /*
// * Calculate omega's.
// */
// if (it >= ttransient) {
// #pragma omp atomic
// omega1[i] += 1.0;
// }
// }
// }
#pragma omp single
memcpy(u, uplus, n * sizeof *u);
#if !defined(ALL_RESULTS)
if (it % ntstep == 0) {
#endif
#pragma omp master
{
printf("Time is %ld\n", it);
gettimeofday(&IO_start, NULL);
fprintf(output1, "%ld\t", it);
for (i = 0; i < n; i++) {
// printf("write to ouput1 i: %d\n",i);
fprintf(output1, "%19.15f", u[i]);
}
fprintf(output1, "\n");
time = (double)it * dt;
fprintf(output2, "%ld\t", it);
for (i = 0; i < n; i++) {
omega[i] = 2.0 * M_PI * omega1[i] / (time - ttransient * dt);
fprintf(output2, "%19.15f", omega[i]);
}
fprintf(output2, "\n");
gettimeofday(&IO_end, NULL);
IO_usec += ((IO_end.tv_sec - IO_start.tv_sec) * 1000000.0 + (IO_end.tv_usec - IO_start.tv_usec));
}//master end
#if !defined(ALL_RESULTS)
}
#endif
}
}//omp parallel
gettimeofday(&global_end, NULL);
global_usec = ((global_end.tv_sec - global_start.tv_sec) * 1000000.0 + (global_end.tv_usec - global_start.tv_usec));
printf("Time for calculations = %13.6f sec\n", (global_usec - IO_usec) / 1000000.0);
printf("Time for I/O = %13.6f sec\n", IO_usec / 1000000.0);
printf("Total execution time = %13.6f sec\n", global_usec / 1000000.0);
fclose(output1);
fclose(output2);
return 0;
}
|
random_draw.c | /*
Copyright (c) 2015, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/**********************************************************************
Name: LCG
Purpose: Provide a mixed Linear Congruential Generator of pseudo-random
numbers with a period of 2^64, plus tools to jump ahead in a sequence
of such generated numbers. For details, see individual functions.
Functions: LCG_next: a new pseudo-randon number
LCG_get_chunk: return subset of an interval of natural numbers
LCG_init: initialize the generator
LCG_jump: jump ahead into a sequence of pseudo-random numbers
random_draw:
Notes: LCG_init must be called by each thread or rank before any jump
into a sequence of pseudo-random numbers is made
History: Written by Rob Van der Wijngaart, December 2015
**********************************************************************/
/*#include <par-res-kern_general.h>*/
#include <math.h>
#include <stdint.h>
#include <inttypes.h>
#include <limits.h>
#include "random_draw.h"
#define NMAX 64
static uint64_t LCG_a = 6364136223846793005;
static uint64_t LCG_c = 1442695040888963407;
static uint64_t LCG_seed_init = 27182818285; //used to (re)set seed
static uint64_t LCG_seed = 27182818285;
static uint64_t LCG_A[NMAX];
#ifdef __OPENMP
#pragma omp threadprivate (LCG_a, LCG_c, LCG_seed, LCG_A)
#endif
/* for a range of 0 to size-i, find chunk assigned to calling thread */
void LCG_get_chunk(uint64_t *start, uint64_t *end, int tid, int nthreads, uint64_t size) {
uint64_t chunk, remainder;
chunk = size/nthreads;
remainder = size - chunk*nthreads;
if ((uint64_t)tid < remainder) {
*start = tid*(chunk+1);
*end = *start + chunk;
}
else {
*start = remainder*(chunk+1) + (tid-remainder)*chunk;
*end = *start + chunk -1;
}
return;
}
static uint64_t tail(uint64_t x) {
uint64_t x2 = x;
uint64_t result = 1;
if (!x) return x;
while (x>>=1) result <<=1;
return (x2 - result);
}
/* Sum(i=1,2^k) a^i */
static uint64_t SUMPOWER(int k) {
if (!k) return LCG_a;
return SUMPOWER(k-1)*(1+LCG_A[k-1]);
}
static int LOG(uint64_t n) {
int result = 0;
while (n>>=1) result++;
return(result);
}
/* Sum(i=1,n) a^i, with n arbitrary */
static uint64_t SUMK(uint64_t n) {
uint64_t HEAD;
uint64_t TAILn;
if (n==0) return(0);
HEAD = SUMPOWER(LOG(n));
TAILn = tail(n);
if (TAILn==0) return(HEAD);
return(HEAD + (LCG_A[LOG(n)])*SUMK(TAILn));
}
uint64_t LCG_next(uint64_t bound) {
LCG_seed = LCG_a*LCG_seed + LCG_c;
return (LCG_seed%bound);
}
void LCG_init(void){
int i;
LCG_seed = LCG_seed_init;
LCG_A[0] = LCG_a;
for (i=1; i<NMAX; i++) {
LCG_A[i] = LCG_A[i-1]*LCG_A[i-1];
}
return;
}
void LCG_jump(uint64_t m, uint64_t bound){
int i, index, LCG_power[NMAX];
uint64_t mm, s_part;
for (i=0; i<NMAX; i++) LCG_power[i] = 0;
LCG_seed = LCG_seed_init;
/* Catch two special cases */
switch (m) {
case 0: return;
case 1: LCG_next(bound); return;
}
mm = m;
index = 0;
while (mm) {
LCG_power[index++] = mm&1;
mm >>=1;
}
s_part = 1;
for (i=0; i<index; i++) if (LCG_power[i]) s_part *= LCG_A[i];
LCG_seed = s_part*LCG_seed + (SUMK(m-1)+1)*LCG_c;
return;
}
uint64_t random_draw(double mu)
{
const double two_pi = 2.0*3.14159265358979323846;
const uint64_t rand_max = ULLONG_MAX;
const double rand_div = 1.0/(double)ULLONG_MAX;
const uint64_t denominator = UINT_MAX;
static double z0, z1;
double u0, u1, sigma;
static uint64_t numerator;
static uint64_t i0, i1;
if (mu>=1.0) {
sigma = mu*0.15;
u0 = LCG_next(rand_max) * rand_div;
u1 = LCG_next(rand_max) * rand_div;
z0 = sqrt(-2.0 * log(u0)) * cos(two_pi * u1);
z1 = sqrt(-2.0 * log(u0)) * sin(two_pi * u1);
return (uint64_t) (z0 * sigma + mu+0.5);
}
else {
/* we need to pick two integers whose quotient approximates mu; set one to UINT_MAX */
numerator = (uint32_t) (mu*(double)denominator);
i0 = LCG_next(denominator); /* don't use this value, but must call LCG_next twice */
i1 = LCG_next(denominator);
return ((uint64_t)(i1<=numerator));
}
}
|
sormqr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zunmqr.c, normal z -> s, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_unmqr
*
* Overwrites the general complex m-by-n matrix C with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * C C * Q
* trans = PlasmaTrans Q^T * C C * Q^T
*
* where Q is an orthogonal (or orthogonal) matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_sgeqrf. Q is of order m if side = PlasmaLeft
* and of order n if side = PlasmaRight.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: No transpose, apply Q;
* - PlasmaTrans: Transpose, apply Q^T.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
* If side == PlasmaLeft, m >= k >= 0.
* If side == PlasmaRight, n >= k >= 0.
*
* @param[in] pA
* Details of the QR factorization of the original matrix A as returned
* by plasma_sgeqrf.
*
* @param[in] lda
* The leading dimension of the array A.
* If side == PlasmaLeft, lda >= max(1,m).
* If side == PlasmaRight, lda >= max(1,n).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_sgeqrf.
*
* @param[in,out] pC
* On entry, pointer to the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_sormqr
* @sa plasma_cunmqr
* @sa plasma_dormqr
* @sa plasma_sormqr
* @sa plasma_sgeqrf
*
******************************************************************************/
int plasma_sormqr(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k,
float *pA, int lda,
plasma_desc_t T,
float *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int am;
if (side == PlasmaLeft) {
am = m;
}
else {
am = n;
}
if ((k < 0) || (k > am)) {
plasma_error("illegal value of k");
return -5;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (m == 0 || n == 0 || k == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geqrf(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
am, k, 0, 0, am, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmqr: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_sormqr(side, trans,
A, T, C, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_unmqr
*
* Non-blocking tile version of plasma_sormqr().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - PlasmaTrans: apply Q^T.
*
* @param[in] A
* Descriptor of matrix A stored in the tile layout.
* Details of the QR factorization of the original matrix A as returned
* by plasma_sgeqrf.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_sgeqrf.
*
* @param[in,out] C
* Descriptor of matrix C.
* On entry, the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sormqr
* @sa plasma_omp_cunmqr
* @sa plasma_omp_dormqr
* @sa plasma_omp_sormqr
* @sa plasma_omp_sgeqrf
*
******************************************************************************/
void plasma_omp_sormqr(plasma_enum_t side, plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T, plasma_desc_t C,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("invalid value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("invalid value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psormqr_tree(side, trans,
A, T, C,
work, sequence, request);
}
else {
plasma_psormqr(side, trans,
A, T, C,
work, sequence, request);
}
}
|
GB_binop__times_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__times_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__times_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8)
// A*D function (colscale): GB (_AxD__times_uint8)
// D*A function (rowscale): GB (_DxB__times_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8)
// C=scalar+B GB (_bind1st__times_uint8)
// C=scalar+B' GB (_bind1st_tran__times_uint8)
// C=A+scalar GB (_bind2nd__times_uint8)
// C=A'+scalar GB (_bind2nd_tran__times_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NETLMv2_fmt_plug.c | /*
* NETLMv2_fmt.c -- LMv2 Challenge/Response
*
* Written by JoMo-Kun <jmk at foofus.net> in 2008
* and placed in the public domain.
*
* Performance fixes, OMP and utf-8 support by magnum 2010-2011
*
* This algorithm is designed for performing brute-force cracking of the LMv2
* challenge/response sets exchanged during network-based authentication
* attempts [1]. The captured challenge/response set from these attempts
* should be stored using the following format:
*
* USERNAME::DOMAIN:SERVER CHALLENGE:LMv2 RESPONSE:CLIENT CHALLENGE
*
* For example:
* Administrator::WORKGROUP:1122334455667788:6759A5A7EFB25452911DE7DE8296A0D8:F503236B200A5B3A
*
* It should be noted that a LMv2 authentication response is not same as a LM
* password hash, which can be extracted using tools such as FgDump [2]. In
* fact, a NTLM hash and not a LM hash is used within the LMv2 algorithm. LMv2
* challenge/response authentication typically takes place when the GPO
* "Network Security: LAN Manager authentication level" is configured to a setting
* that enforces the use of NTLMv2, such as "Send NTLMv2 response only\refuse
* LM & NTLM."
*
* LMv2 responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theLmv2Response
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETLMv2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETLMv2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "stdint.h"
#include "md5.h"
#include "hmacmd5.h"
#include "byteorder.h"
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "netlmv2"
#define FORMAT_NAME "LMv2 C/R"
#define FORMAT_TAG "$NETLMv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD4 HMAC-MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */
#define USERNAME_LENGTH 60 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */
#define DOMAIN_LENGTH 45 /* lmcons.h - CNLEN / DNLEN */
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define CHALLENGE_LENGTH 32
#define SALT_SIZE 16 + 1 + 2 * (USERNAME_LENGTH + DOMAIN_LENGTH) + 1
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 32
#define TOTAL_LENGTH 12 + USERNAME_LENGTH + DOMAIN_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
// these may be altered in init() if running OMP
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#ifndef OMP_SCALE
#define OMP_SCALE 1536
#endif
static struct fmt_tests tests[] = {
{"", "1337adminPASS", {"FOODOM\\Administrator", "", "", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} },
{"$NETLMv2$ADMINISTRATORFOODOM$1122334455667788$6F64C5C1E35F68DD80388C0F00F34406$F0F3FF27037AA69F", "1337adminPASS"},
{"$NETLMv2$USER1$1122334455667788$B1D163EA5881504F3963DC50FCDC26C1$EB4D9E8138149E20", "foobar"},
// repeat in exactly the same format that is used in john.pot (lower case hex)
{"$NETLMv2$USER1$1122334455667788$b1d163ea5881504f3963dc50fcdc26c1$eb4d9e8138149e20", "foobar"},
{"$NETLMv2$ATEST$1122334455667788$83B59F1536D3321DBF1FAEC14ADB1675$A1E7281FE8C10E53", "SomeFancyP4$$w0rdHere"},
{"", "1337adminPASS", {"administrator", "", "FOODOM", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} },
{"", "foobar", {"user1", "", "", "1122334455667788", "B1D163EA5881504F3963DC50FCDC26C1", "EB4D9E8138149E20"} },
{"", "SomeFancyP4$$w0rdHere", {"aTest", "", "", "1122334455667788", "83B59F1536D3321DBF1FAEC14ADB1675", "A1E7281FE8C10E53"} },
{NULL}
};
static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uchar (*output)[BINARY_SIZE];
static HMACMD5Context (*saved_ctx);
static int keys_prepared;
static unsigned char *challenge;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output));
saved_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_ctx));
}
static void done(void)
{
MEM_FREE(saved_ctx);
MEM_FREE(output);
MEM_FREE(saved_len);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
pos = &ciphertext[FORMAT_TAG_LEN];
/* Validate Username and Domain Length */
for (pos2 = pos; *pos2 != '$'; pos2++)
if ((unsigned char)*pos2 < 0x20)
return 0;
if ( !(*pos2 && (pos2 - pos <= USERNAME_LENGTH + DOMAIN_LENGTH)) )
return 0;
/* Validate Server Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate LMv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Client Challenge Length */
pos2++; pos = pos2;
for (; atoi16[ARCH_INDEX(*pos2)] != 0x7F; pos2++);
if (pos2 - pos != CHALLENGE_LENGTH / 2)
return 0;
if (pos2[0] != '\0')
return 0;
return 1;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *srv_challenge = split_fields[3];
char *nethashv2 = split_fields[4];
char *cli_challenge = split_fields[5];
char *login = split_fields[0];
char *uid = split_fields[2];
char *identity = NULL, *tmp;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (!split_fields[0]||!split_fields[2]||!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
/* DOMAIN\USER: -or- USER::DOMAIN: */
if ((tmp = strstr(login, "\\")) != NULL) {
identity = (char *) mem_alloc(strlen(login)*2 + 1);
strcpy(identity, tmp + 1);
/* Upper-Case Username - Not Domain */
enc_strupper(identity);
strncat(identity, login, tmp - login);
}
else {
identity = (char *) mem_alloc(strlen(login)*2 + strlen(uid) + 1);
strcpy(identity, login);
enc_strupper(identity);
strcat(identity, uid);
}
tmp = (char *) mem_alloc(9 + strlen(identity) + 1 + strlen(srv_challenge) + 1 + strlen(nethashv2) + 1 + strlen(cli_challenge) + 1);
sprintf(tmp, "%s%s$%s$%s$%s", FORMAT_TAG, identity, srv_challenge, nethashv2, cli_challenge);
MEM_FREE(identity);
if (valid(tmp, self)) {
char *cp = str_alloc_copy(tmp);
MEM_FREE(tmp);
return cp;
}
MEM_FREE(tmp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
char *pos = NULL;
int identity_length = 0;
/* Calculate identity length */
for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++);
identity_length = pos - (ciphertext + FORMAT_TAG_LEN);
memset(out, 0, TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
strlwr(&out[FORMAT_TAG_LEN + identity_length + 1]); /* Exclude: $NETLMv2$USERDOMAIN$ */
return out;
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
char *pos = NULL;
int i, identity_length;
if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++);
identity_length = pos - (ciphertext + FORMAT_TAG_LEN);
ciphertext += FORMAT_TAG_LEN + identity_length + 1 + CHALLENGE_LENGTH / 2 + 1;
for (i=0; i<BINARY_SIZE; i++)
{
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
return binary;
}
/* Calculate the LMv2 response for the given challenge, using the
specified authentication identity (username and domain), password
and client nonce.
*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i = 0;
#ifdef _OPENMP
#pragma omp parallel for
for(i = 0; i < count; i++)
#endif
{
unsigned char ntlm_v2_hash[16];
HMACMD5Context ctx; // can't be moved above the OMP pragma
if (!keys_prepared) {
int len;
unsigned char ntlm[16];
/* Generate 16-byte NTLM hash */
len = E_md4hash(saved_plain[i], saved_len[i], ntlm);
// We do key setup of the next HMAC_MD5 here (once per salt)
hmac_md5_init_K16(ntlm, &saved_ctx[i]);
if (len <= 0)
saved_plain[i][-len] = 0; // match truncation
}
/* HMAC-MD5(Username + Domain, NTLM Hash) */
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update(&challenge[17], (int)challenge[16], &ctx);
hmac_md5_final(ntlm_v2_hash, &ctx);
/* Generate 16-byte non-client nonce portion of LMv2 Response */
/* HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) + Nonce */
hmac_md5(ntlm_v2_hash, challenge, 16, (unsigned char*)output[i]);
}
keys_prepared = 1;
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for(index=0; index<count; index++)
if (!memcmp(output[index], binary, BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(output[index], binary, BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return !memcmp(output[index], get_binary(source), BINARY_SIZE);
}
/* We're essentially using three salts, but we're going to pack it into a single blob for now.
|Client Challenge (8 Bytes)|Server Challenge (8 Bytes)|Unicode(Username (<=20).Domain (<=15))
*/
static void *get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char identity[USERNAME_LENGTH + DOMAIN_LENGTH + 1];
UTF16 identity_ucs2[USERNAME_LENGTH + DOMAIN_LENGTH + 1];
int i, identity_length;
int identity_ucs2_length;
char *pos = NULL;
if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
memset(binary_salt, 0, SALT_SIZE);
/* Calculate identity length */
for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++);
identity_length = pos - (ciphertext + FORMAT_TAG_LEN);
/* Convert identity (username + domain) string to NT unicode */
strnzcpy((char *)identity, ciphertext + FORMAT_TAG_LEN, sizeof(identity));
identity_ucs2_length = enc_to_utf16((UTF16 *)identity_ucs2, USERNAME_LENGTH + DOMAIN_LENGTH, (UTF8 *)identity, identity_length) * sizeof(int16_t);
if (identity_ucs2_length < 0) // Truncated at Unicode conversion.
identity_ucs2_length = strlen16((UTF16 *)identity_ucs2) * sizeof(int16_t);
binary_salt[16] = (unsigned char)identity_ucs2_length;
memcpy(&binary_salt[17], (char *)identity_ucs2, identity_ucs2_length);
/* Set server challenge */
ciphertext += FORMAT_TAG_LEN + identity_length + 1;
for (i = 0; i < 8; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
/* Set client challenge */
ciphertext += 2 + CHALLENGE_LENGTH / 2 + CIPHERTEXT_LENGTH;
for (i = 0; i < 8; ++i)
binary_salt[i + 8] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
/* Return a concatenation of the server and client challenges and the identity value */
return (void*)binary_salt;
}
static void set_salt(void *salt)
{
challenge = salt;
}
static void set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy((char *)saved_plain[index], key, saved_len[index] + 1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return (char *)saved_plain[index];
}
static int salt_hash(void *salt)
{
// Hash the client challenge (in case server salt was spoofed)
return (*(ARCH_WORD_32 *)salt+8) & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return *(ARCH_WORD_32 *)output[index] & PH_MASK_6;
}
struct fmt_main fmt_NETLMv2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_uint64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fp64)
// op(A') function: GB (_unop_tran__identity_uint64_fp64)
// C type: uint64_t
// A type: double
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fp64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_concat_sparse.c | //------------------------------------------------------------------------------
// GB_concat_sparse: concatenate an array of matrices into a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#define GB_FREE_WORK \
if (S != NULL) \
{ \
for (int64_t k = 0 ; k < m * n ; k++) \
{ \
GB_Matrix_free (&(S [k])) ; \
} \
} \
GB_FREE_WERK (&S, S_size) ; \
GB_FREE_WERK (&Work, Work_size) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ;
#define GB_FREE_ALL \
GB_FREE_WORK ; \
GB_phbix_free (C) ;
#include "GB_concat.h"
GrB_Info GB_concat_sparse // concatenate into a sparse matrix
(
GrB_Matrix C, // input/output matrix for results
const int64_t cnz, // # of entries in C
const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n,
const GrB_Index m,
const GrB_Index n,
const int64_t *restrict Tile_rows, // size m+1
const int64_t *restrict Tile_cols, // size n+1
GB_Context Context
)
{
//--------------------------------------------------------------------------
// allocate C as a sparse matrix
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Matrix A = NULL ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
int64_t *Work = NULL ;
size_t Work_size = 0 ;
GrB_Matrix *S = NULL ;
size_t S_size = 0 ;
GrB_Type ctype = C->type ;
int64_t cvlen = C->vlen ;
int64_t cvdim = C->vdim ;
bool csc = C->is_csc ;
size_t csize = ctype->size ;
GB_Type_code ccode = ctype->code ;
float hyper_switch = C->hyper_switch ;
float bitmap_switch = C->bitmap_switch ;
int sparsity_control = C->sparsity ;
bool static_header = C->static_header ;
GB_phbix_free (C) ;
GB_OK (GB_new_bix (&C, static_header, // prior static or dynamic header
ctype, cvlen, cvdim, GB_Ap_malloc, csc, GxB_SPARSE, false,
hyper_switch, cvdim, cnz, true, Context)) ;
C->bitmap_switch = bitmap_switch ;
C->sparsity = sparsity_control ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t nouter = csc ? n : m ;
int64_t ninner = csc ? m : n ;
Work = GB_CALLOC_WERK (ninner * cvdim, int64_t, &Work_size) ;
S = GB_CALLOC_WERK (m * n, GrB_Matrix, &S_size) ;
if (S == NULL || Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count entries in each vector of each tile
//--------------------------------------------------------------------------
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// get the tile A; transpose and typecast, if needed
//------------------------------------------------------------------
A = csc ? GB_TILE (Tiles, inner, outer)
: GB_TILE (Tiles, outer, inner) ;
GrB_Matrix T = NULL ;
if (csc != A->is_csc)
{
// T = (ctype) A', not in-place, using a dynamic header
GB_OK (GB_transpose (&T, ctype, csc, A,
NULL, NULL, NULL, false, Context)) ;
// save T in array S
if (csc)
{
GB_TILE (S, inner, outer) = T ;
}
else
{
GB_TILE (S, outer, inner) = T ;
}
A = T ;
GB_MATRIX_WAIT (A) ;
}
ASSERT (C->is_csc == A->is_csc) ;
ASSERT (!GB_ANY_PENDING_WORK (A)) ;
//------------------------------------------------------------------
// ensure the tile is not bitmap
//------------------------------------------------------------------
if (GB_IS_BITMAP (A))
{
if (T == NULL)
{
// copy A into T
GB_OK (GB_dup2 (&T, A, true, NULL, Context)) ;
// save T in array S
if (csc)
{
GB_TILE (S, inner, outer) = T ;
}
else
{
GB_TILE (S, outer, inner) = T ;
}
}
// convert T from bitmap to sparse
GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ;
A = T ;
}
ASSERT (!GB_IS_BITMAP (A)) ;
//------------------------------------------------------------------
// log the # of entries in each vector of the tile A
//------------------------------------------------------------------
const int64_t anvec = A->nvec ;
const int64_t avlen = A->vlen ;
int64_t cvstart = csc ? Tile_cols [outer] : Tile_rows [outer] ;
int64_t *restrict W = Work + inner * cvdim + cvstart ;
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
if (GB_IS_FULL (A))
{
// A is full
int64_t j ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (j = 0 ; j < anvec ; j++)
{
// W [j] = # of entries in A(:,j), which is just avlen
W [j] = avlen ;
}
}
else
{
// A is sparse or hyper
int64_t k ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ap = A->p ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < anvec ; k++)
{
// W [j] = # of entries in A(:,j), the kth column of A
int64_t j = GBH (Ah, k) ;
W [j] = Ap [k+1] - Ap [k] ;
}
}
}
}
//--------------------------------------------------------------------------
// cumulative sum of entries in each tile
//--------------------------------------------------------------------------
int nth = GB_nthreads (ninner*cvdim, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < cvdim ; k++)
{
int64_t s = 0 ;
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
int64_t p = inner * cvdim + k ;
int64_t c = Work [p] ;
Work [p] = s ;
s += c ;
}
// total number of entries in C(:,k)
Cp [k] = s ;
}
GB_cumsum (Cp, cvdim, &(C->nvec_nonempty), nthreads_max, Context) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < cvdim ; k++)
{
int64_t pC = Cp [k] ;
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
int64_t p = inner * cvdim + k ;
Work [p] += pC ;
}
}
//--------------------------------------------------------------------------
// concatenate all matrices into C
//--------------------------------------------------------------------------
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// get the tile A, either the temporary matrix T or the original A
//------------------------------------------------------------------
A = csc ? GB_TILE (S, inner, outer)
: GB_TILE (S, outer, inner) ;
if (A == NULL)
{
A = csc ? GB_TILE (Tiles, inner, outer)
: GB_TILE (Tiles, outer, inner) ;
}
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (C->is_csc == A->is_csc) ;
ASSERT (!GB_ANY_PENDING_WORK (A)) ;
GB_Type_code acode = A->type->code ;
//------------------------------------------------------------------
// determine where to place the tile in C
//------------------------------------------------------------------
// The tile A appears in vectors cvstart:cvend-1 of C, and indices
// cistart:ciend-1.
int64_t cvstart, cvend, cistart, ciend ;
if (csc)
{
// C and A are held by column
// Tiles is row-major and accessed in column order
cvstart = Tile_cols [outer] ;
cvend = Tile_cols [outer+1] ;
cistart = Tile_rows [inner] ;
ciend = Tile_rows [inner+1] ;
}
else
{
// C and A are held by row
// Tiles is row-major and accessed in row order
cvstart = Tile_rows [outer] ;
cvend = Tile_rows [outer+1] ;
cistart = Tile_cols [inner] ;
ciend = Tile_cols [inner+1] ;
}
// get the workspace pointer array W for this tile
int64_t *restrict W = Work + inner * cvdim + cvstart ;
//------------------------------------------------------------------
// slice the tile
//------------------------------------------------------------------
int64_t avdim = cvend - cvstart ;
int64_t avlen = ciend - cistart ;
ASSERT (avdim == A->vdim) ;
ASSERT (avlen == A->vlen) ;
int A_nthreads, A_ntasks ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
GB_SLICE_MATRIX (A, 1, chunk) ;
//------------------------------------------------------------------
// copy the tile A into C
//------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
if (ccode == acode)
{
// no typecasting needed
switch (csize)
{
#define GB_COPY(pC,pA) Cx [pC] = Ax [pA]
case 1 : // uint8, int8, bool, or 1-byte user-defined
#define GB_CTYPE uint8_t
#include "GB_concat_sparse_template.c"
break ;
case 2 : // uint16, int16, or 2-byte user-defined
#define GB_CTYPE uint16_t
#include "GB_concat_sparse_template.c"
break ;
case 4 : // uint32, int32, float, or 4-byte user-defined
#define GB_CTYPE uint32_t
#include "GB_concat_sparse_template.c"
break ;
case 8 : // uint64, int64, double, float complex,
// or 8-byte user defined
#define GB_CTYPE uint64_t
#include "GB_concat_sparse_template.c"
break ;
case 16 : // double complex or 16-byte user-defined
#define GB_CTYPE uint64_t
#undef GB_COPY
#define GB_COPY(pC,pA) \
Cx [2*pC ] = Ax [2*pA ] ; \
Cx [2*pC+1] = Ax [2*pA+1] ;
#include "GB_concat_sparse_template.c"
break ;
default:;
}
}
#endif
if (!done)
{
// with typecasting or user-defined types
GB_cast_function cast_A_to_C = GB_cast_factory (ccode, acode) ;
size_t asize = A->type->size ;
#define GB_CTYPE GB_void
#undef GB_COPY
#define GB_COPY(pC,pA) \
cast_A_to_C (Cx + (pC)*csize, Ax + (pA)*asize, asize) ;
#include "GB_concat_sparse_template.c"
}
GB_WERK_POP (A_ek_slicing, int64_t) ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
C->magic = GB_MAGIC ;
return (GrB_SUCCESS) ;
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
(void) AcquireMagickResource(MemoryResource,matrix_info->length);
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) ResetMagickMemory(columns,0,rank*sizeof(*columns));
(void) ResetMagickMemory(rows,0,rank*sizeof(*rows));
(void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register PixelPacket
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the ResetMagickMemory method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
if (matrix_info->type != DiskCache)
{
(void) ResetMagickMemory(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
GB_binop__lt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int64)
// A*D function (colscale): GB (_AxD__lt_int64)
// D*A function (rowscale): GB (_DxB__lt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int64)
// C=scalar+B GB (_bind1st__lt_int64)
// C=scalar+B' GB (_bind1st_tran__lt_int64)
// C=A+scalar GB (_bind2nd__lt_int64)
// C=A'+scalar GB (_bind2nd_tran__lt_int64)
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT64 || GxB_NO_LT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | #ifndef MY_GRAPH_H
#define MY_GRAPH_H
#include "utils.h"
typedef unsigned int intV;
typedef unsigned int intE;
typedef long long int intB;
#define CL_SIZE 1
class Graph
{
private:
std::vector<uint8_t> deleted; //is the vertex deleted in previous rounds
void remove_duplicates();
public:
intV numU;
intV numV;
intV numT;
intE numE;
std::vector<std::vector<intV>> adj;
std::vector<std::vector<intE>> eId;
std::vector<intV> deg;
std::vector<intV> uLabels;
std::vector<intV> vLabels;
Graph(): numV(0), numU(0), numT(0), numE(0) {}
Graph(intV V, intV U, intE E): numV(V), numU(U), numT(U+V), numE(E)
{
adj.resize(numT);
uLabels.resize(numU);
vLabels.resize(numV);
deg.resize(numT, 0);
deleted.resize(numT, 0);
}
//get neighbors of a vertex
inline std::vector<intV>& get_neigh(intV v, intV °V);
//sort adjacency lists on increasing order of index or
//decreasing order of user-specified priority
void sort_adj();
void sort_adj(std::vector<intV> &priority);
//sort vertices in the degree order
void sort_deg(std::vector<intV> &opList);
//reorder the graph and produce a new graph object
void reorder(std::vector<intV> &newLabels, Graph &outG);
//reorder the same graph object
void reorder_in_place(std::vector<intV> &newLabels);
//create a copy (excludes deleted vertice/edges)
void copy(Graph &outG);
//read graph from a file
void read_graph(std::string &filename, int peelSide);
void read_graph_bin(std::string &filename, int peelSide);
//check if vertex is deleted
inline bool is_deleted(intV v);
//delete/restore vertices
inline void delete_vertex(intV v);
inline void restore_vertex(intV v);
//delete edges
inline void delete_edges();
inline void restore_edges();
void get_labels(std::vector<intV> &labels, int side);
//FOR DEBUGGING//
void dump_graph();
void print_graph();
~Graph() {
#pragma omp parallel for
for (intV i=0; i<numT; i++)
free_vec(adj[i]);
free_vec(adj);
free_vec(deg);
free_vec(uLabels);
free_vec(vLabels);
free_vec(eId);
free_vec(deleted);
}
};
void Graph::sort_adj()
{
#pragma omp parallel for schedule(dynamic, NUM_THREADS)
for (intV i=0; i<numT; i++)
std::stable_sort(adj[i].begin(), adj[i].end());
}
void Graph::sort_adj(std::vector<intV> &priority)
{
#pragma omp parallel for schedule(dynamic, NUM_THREADS)
for (intV i=0; i<numT; i++)
serial_sort_kv(adj[i], priority);
}
//sorted labels is the list of vertices in a degree sorted order
void Graph::sort_deg(std::vector<intV> &sortedLabels)
{
sortedLabels.resize(numT);
#pragma omp parallel for
for (intV i=0; i<numT; i++)
sortedLabels[i] = i;
parallel_sort_kv<intV, intV>(sortedLabels, deg);
}
//newLabels is the map from existing to new labels
//eg. newLabels = [1, 2, 0] means vertex 0 is now 1,
//vertex 1 is now 2 and vertex 2 is now 0.
void Graph::reorder(std::vector<intV> &newLabels, Graph &outG)
{
outG.numU = numU;
outG.numV = numV;
outG.numT = numT;
outG.numE = numE;
outG.adj.resize(numT);
outG.uLabels.resize(numU);
outG.vLabels.resize(numV);
outG.deg.resize(numT, 0);
outG.deleted.resize(numT, false);
#pragma omp parallel for
for (intV i=0; i<numU; i++)
outG.uLabels[i] = newLabels[uLabels[i]];
#pragma omp parallel for
for (intV i=0; i<numV; i++)
outG.vLabels[i] = newLabels[vLabels[i]];
parallel_sort_indices(outG.uLabels, std::less<intV>());
parallel_sort_indices(outG.vLabels, std::less<intV>());
#pragma omp parallel for schedule(dynamic, NUM_THREADS)
for (intV i=0; i<numT; i++)
{
intV numNeigh = adj[i].size();
outG.adj[newLabels[i]].resize(numNeigh);
for (intV j=0; j<numNeigh; j++)
outG.adj[newLabels[i]][j] = newLabels[adj[i][j]];
outG.deg[newLabels[i]] = deg[i];
outG.deleted[newLabels[i]] = deleted[i];
}
outG.sort_adj();
}
void Graph::copy(Graph &outG)
{
outG.numU = numU;
outG.numV = numV;
outG.numT = numT;
outG.numE = numE;
outG.adj.resize(numT);
outG.deg.resize(numT);
outG.deleted.resize(numT);
parallel_vec_copy(outG.uLabels, uLabels);
parallel_vec_copy(outG.vLabels, vLabels);
parallel_vec_copy(outG.deleted, deleted);
#pragma omp parallel for schedule(dynamic)
for (intV i=0; i<numT; i++)
{
intV newDeg = 0;
if (deleted[i]==0)
{
intV numNeigh = adj[i].size();
for (intV j=0; j<numNeigh; j++)
{
intV neigh = adj[i][j];
if (deleted[neigh]) continue;
newDeg++;
}
outG.adj[i].resize(newDeg);
newDeg = 0;
for (intV j=0; j<numNeigh; j++)
{
intV neigh = adj[i][j];
if (deleted[neigh]) continue;
outG.adj[i][newDeg] = neigh;
newDeg++;
}
}
outG.deleted[i] = deleted[i];
outG.deg[i] = newDeg;
}
outG.numE = parallel_reduce<intE, intV>(outG.deg)/2;
}
//newLabels is the map from existing to new labels
//eg. newLabels = [1, 2, 0] means vertex 0 is now 1,
//vertex 1 is now 2 and vertex 2 is now 0.
void Graph::reorder_in_place(std::vector<intV> &newLabels)
{
std::vector<std::vector<intV>> adjNew (numT);
std::vector<intV> uNew (numU);
std::vector<intV> vNew (numV);
std::vector<intV> degNew (numT);
std::vector<uint8_t> delNew (numT);
#pragma omp parallel for
for (intV i=0; i<numU; i++)
uNew[i] = newLabels[uLabels[i]];
#pragma omp parallel for
for (intV i=0; i<numV; i++)
vNew[i] = newLabels[vLabels[i]];
parallel_sort_indices(uNew, std::less<intV>());
parallel_sort_indices(vNew, std::less<intV>());
#pragma omp parallel for schedule(dynamic, NUM_THREADS)
for (intV i=0; i<numT; i++)
{
intV numNeigh = adj[i].size();
// adjNew[newLabels[i]].resize(numNeigh);
adjNew[newLabels[i]].swap(adj[i]);
for (intV j=0; j<numNeigh; j++)
adjNew[newLabels[i]][j] = newLabels[adjNew[newLabels[i]][j]];
// adjNew[newLabels[i]][j] = newLabels[adj[i][j]];
degNew[newLabels[i]] = deg[i];
delNew[newLabels[i]] = deleted[i];
}
uNew.swap(uLabels);
vNew.swap(vLabels);
adjNew.swap(adj);
degNew.swap(deg);
delNew.swap(deleted);
sort_adj();
}
void Graph::read_graph_bin(std::string &filename, int peelSide)
{
FILE* fp = fopen(filename.c_str(), "rb");
if (fp==NULL)
{
fputs("file error\n", stderr);
exit(EXIT_FAILURE);
}
fread(&numU, sizeof(intV), 1, fp);
fread(&numV, sizeof(intV), 1, fp);
if (peelSide==1)
std::swap(numU, numV);
numT = numU + numV;
intE numEdgesRead = 0;
fread(&numEdgesRead, sizeof(intE), 1, fp);
std::vector<intV> uList (numEdgesRead);
std::vector<intV> vList (numEdgesRead);
fread(&uList[0], sizeof(intV), numEdgesRead, fp);
fread(&vList[0], sizeof(intV), numEdgesRead, fp);
if (peelSide==1)
uList.swap(vList);
uLabels.resize(numU);
for (intV i=0; i<numU; i++)
uLabels[i] = i;
//in Koblenz format, both U and V indices start from 1
//to distinguish, add numU to V indices
vLabels.resize(numV);
for (intV i=0; i<numV; i++)
vLabels[i] = i+numU;
adj.resize(numT);
//#pragma omp parallel for
for (intE i=0; i<numEdgesRead; i++)
{
intV u = uList[i]; intV v = vList[i] + numU;
adj[u].push_back(v);
adj[v].push_back(u);
}
//printf("adjacency list created\n");
deleted.resize(numT, false);
remove_duplicates();
fclose(fp);
}
void Graph::read_graph(std::string &filename, int peelSide)
{
FILE* fp = fopen(filename.c_str(), "r");
if (fp==NULL)
{
fputs("file error\n", stderr);
exit(EXIT_FAILURE);
}
//ignore sentences starting with "%"
fpos_t position;
char buf[256];
fgetpos(fp, &position);
fgets(buf, sizeof(buf), fp);
while((buf[0]=='%') && !feof(fp))
{
fgetpos(fp, &position);
fgets(buf, sizeof(buf), fp);
}
if (feof(fp))
return;
fsetpos(fp, &position);
//printf("comments finished\n");
std::vector<std::pair<intV, intV>> edges;
intV u, v;
while(!feof(fp))
{
if (fscanf(fp, "%d", &u) <= 0)
break;
if (fscanf(fp, "%d", &v) <= 0)
break;
if (peelSide==1)
std::swap(u, v);
numU = std::max(u+1, numU);
numV = std::max(v+1, numV);
edges.push_back(std::make_pair(u, v));
}
fclose(fp);
//printf("%d edges read\n", edges.size());
numT = numU + numV;
uLabels.resize(numU);
for (intV i=0; i<numU; i++)
uLabels[i] = i;
//in Koblenz format, both U and V indices start from 1
//to distinguish, add numU to V indices
vLabels.resize(numV);
for (intV i=0; i<numV; i++)
vLabels[i] = i+numU;
adj.resize(numT);
intE numEdgesRead = edges.size();
for (intE i=0; i<numEdgesRead; i++)
{
u = edges[i].first; v = edges[i].second+numU;
adj[u].push_back(v);
adj[v].push_back(u);
}
//printf("adjacency list created\n");
deleted.resize(numT, false);
remove_duplicates();
}
inline std::vector<intV>& Graph::get_neigh(intV v, intV °V)
{
degV = deg[v];
return adj[v];
}
//remove duplicate edges (also computes final degree)
void Graph::remove_duplicates()
{
deg.resize(numT);
sort_adj();
#pragma omp parallel for schedule(dynamic)
for (intV i=0; i<numT; i++)
{
intV numNeigh = 0;
for (intV j=0; j<adj[i].size(); j++)
{
if (j==0)
numNeigh++;
else if (adj[i][j] != adj[i][j-1])
numNeigh++;
}
//set degree
deg[i] = numNeigh;
if(numNeigh!=adj[i].size())
{
std::vector<intV> vec(numNeigh);
numNeigh = 0;
for (intV j=0; j<adj[i].size(); j++)
{
if (j==0)
vec[numNeigh++] = adj[i][j];
else if (adj[i][j] != adj[i][j-1])
vec[numNeigh++] = adj[i][j];
}
vec.swap(adj[i]);
}
//assert(deg[i]==adj[i].size());
}
intE tempE = 0;
#pragma omp parallel for reduction(+:tempE)
for (intV i=0; i<numT; i++)
tempE += deg[i];
numE = tempE/2;
}
inline bool Graph::is_deleted(intV v)
{
return ((deleted[v]==1) ? true : false);
}
inline void Graph::delete_vertex(intV v)
{
deleted[v] = 1;
}
inline void Graph::restore_vertex(intV v)
{
deleted[v] = 0;
}
//delete edges incident on deleted vertices
//assuming only vertices on 'U' side are deleted
inline void Graph::delete_edges()
{
#pragma omp parallel for
for (intV i=0; i<numU; i++)
{
intV vId = uLabels[i];
if (is_deleted(vId))
deg[vId] = 0;
}
#pragma omp parallel for schedule(static, 1)
for (intV i=0; i<numV; i++)
{
intV vId = vLabels[i];
intV start = 0;
intV end = deg[vId];
while(start != end)
{
if (is_deleted(adj[vId][start]))
{
end--;
std::swap(adj[vId][start], adj[vId][end]);
}
else
start++;
}
deg[vId] = end;
std::sort(adj[vId].begin(), adj[vId].begin() + deg[vId]);
}
}
//restore all edges
//assuming only vertices on 'U' side were deleted
inline void Graph::restore_edges()
{
#pragma omp parallel for
for (intV i=0; i<numT; i++)
deg[i] = adj[i].size();
//#pragma omp parallel for
//for (intV i=0; i<numU; i++)
//{
// intV vId = uLabels[i];
// deg[vId] = adj[vId].size();
//}
//#pragma omp parallel for schedule(static, 1)
//for (intV i=0; i<numV; i++)
//{
// intV vId = vLabels[i];
// if(deg[vId] != adj[vId].size())
// {
// deg[vId] = adj[vId].size();
// //sorting not required if we use "delete_edges()"
// //before every partition <TODO>
// std::sort(adj[vId].begin(), adj[vId].begin() + deg[vId]);
// }
//}
}
void Graph::dump_graph()
{
FILE* fp = fopen("dump.txt", "w");
for (intV i=0; i<numU; i++)
{
for (intV j=0; j<deg[i]; j++)
fprintf(fp,"%d %d\n", i, adj[i][j]-numU);
}
fclose(fp);
}
void Graph::print_graph()
{
for (intV i=0; i<numU; i++)
{
for (intV j=0; j<deg[i]; j++)
printf("%d %d\n", i, adj[i][j]);
}
}
void Graph::get_labels(std::vector<intV> &labels, int side)
{
if (side==0)
parallel_vec_copy(labels, uLabels);
else
parallel_vec_copy(labels, vLabels);
}
#endif
|
Example_threadprivate.2.c | /*
* @@name: threadprivate.2c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
int increment_counter_2()
{
static int counter = 0;
#pragma omp threadprivate(counter)
counter++;
return(counter);
}
|
hypre_prefix_sum.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Jongsoo Park et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_utilities.h"
void hypre_prefix_sum(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int *workspace)
{
#ifdef HYPRE_USING_OPENMP
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int num_threads = hypre_NumActiveThreads();
hypre_assert(1 == num_threads || omp_in_parallel());
workspace[my_thread_num + 1] = *in_out;
#pragma omp barrier
#pragma omp master
{
HYPRE_Int i;
workspace[0] = 0;
for (i = 1; i < num_threads; i++)
{
workspace[i + 1] += workspace[i];
}
*sum = workspace[num_threads];
}
#pragma omp barrier
*in_out = workspace[my_thread_num];
#else /* !HYPRE_USING_OPENMP */
*sum = *in_out;
*in_out = 0;
workspace[0] = 0;
workspace[1] = *sum;
#endif /* !HYPRE_USING_OPENMP */
}
void hypre_prefix_sum_pair(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *workspace)
{
#ifdef HYPRE_USING_OPENMP
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int num_threads = hypre_NumActiveThreads();
hypre_assert(1 == num_threads || omp_in_parallel());
workspace[(my_thread_num + 1)*2] = *in_out1;
workspace[(my_thread_num + 1)*2 + 1] = *in_out2;
#pragma omp barrier
#pragma omp master
{
HYPRE_Int i;
workspace[0] = 0;
workspace[1] = 0;
for (i = 1; i < num_threads; i++)
{
workspace[(i + 1)*2] += workspace[i*2];
workspace[(i + 1)*2 + 1] += workspace[i*2 + 1];
}
*sum1 = workspace[num_threads*2];
*sum2 = workspace[num_threads*2 + 1];
}
#pragma omp barrier
*in_out1 = workspace[my_thread_num*2];
*in_out2 = workspace[my_thread_num*2 + 1];
#else /* !HYPRE_USING_OPENMP */
*sum1 = *in_out1;
*sum2 = *in_out2;
*in_out1 = 0;
*in_out2 = 0;
workspace[0] = 0;
workspace[1] = 0;
workspace[2] = *sum1;
workspace[3] = *sum2;
#endif /* !HYPRE_USING_OPENMP */
}
void hypre_prefix_sum_triple(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *in_out3, HYPRE_Int *sum3, HYPRE_Int *workspace)
{
#ifdef HYPRE_USING_OPENMP
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int num_threads = hypre_NumActiveThreads();
hypre_assert(1 == num_threads || omp_in_parallel());
workspace[(my_thread_num + 1)*3] = *in_out1;
workspace[(my_thread_num + 1)*3 + 1] = *in_out2;
workspace[(my_thread_num + 1)*3 + 2] = *in_out3;
#pragma omp barrier
#pragma omp master
{
HYPRE_Int i;
workspace[0] = 0;
workspace[1] = 0;
workspace[2] = 0;
for (i = 1; i < num_threads; i++)
{
workspace[(i + 1)*3] += workspace[i*3];
workspace[(i + 1)*3 + 1] += workspace[i*3 + 1];
workspace[(i + 1)*3 + 2] += workspace[i*3 + 2];
}
*sum1 = workspace[num_threads*3];
*sum2 = workspace[num_threads*3 + 1];
*sum3 = workspace[num_threads*3 + 2];
}
#pragma omp barrier
*in_out1 = workspace[my_thread_num*3];
*in_out2 = workspace[my_thread_num*3 + 1];
*in_out3 = workspace[my_thread_num*3 + 2];
#else /* !HYPRE_USING_OPENMP */
*sum1 = *in_out1;
*sum2 = *in_out2;
*sum3 = *in_out3;
*in_out1 = 0;
*in_out2 = 0;
*in_out3 = 0;
workspace[0] = 0;
workspace[1] = 0;
workspace[2] = 0;
workspace[3] = *sum1;
workspace[4] = *sum2;
workspace[5] = *sum3;
#endif /* !HYPRE_USING_OPENMP */
}
void hypre_prefix_sum_multiple(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int n, HYPRE_Int *workspace)
{
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int num_threads = hypre_NumActiveThreads();
hypre_assert(1 == num_threads || omp_in_parallel());
for (i = 0; i < n; i++)
{
workspace[(my_thread_num + 1)*n + i] = in_out[i];
}
#pragma omp barrier
#pragma omp master
{
HYPRE_Int t;
for (i = 0; i < n; i++)
{
workspace[i] = 0;
}
// assuming n is not so big, we don't parallelize this loop
for (t = 1; t < num_threads; t++)
{
for (i = 0; i < n; i++)
{
workspace[(t + 1)*n + i] += workspace[t*n + i];
}
}
for (i = 0; i < n; i++)
{
sum[i] = workspace[num_threads*n + i];
}
}
#pragma omp barrier
for (i = 0; i < n; i++)
{
in_out[i] = workspace[my_thread_num*n + i];
}
#else /* !HYPRE_USING_OPENMP */
for (i = 0; i < n; i++)
{
sum[i] = in_out[i];
in_out[i] = 0;
workspace[i] = 0;
workspace[n + i] = sum[i];
}
#endif /* !HYPRE_USING_OPENMP */
}
|
cityblock.c | #include <math.h>
void cbdm(double *a, double *b, double *r, int num_rows, int num_cols) {
double _r;
#pragma omp parallel for reduction (+:_r)
for(int i = 0; i < num_rows; i++) {
for(int j = 0; j < num_rows ; j++) {
_r = 0.0;
for(int k = 0; k < num_cols ; k++) {
_r += fabs(a[i * num_cols + k] - b[j * num_cols + k]);
}
r[i * num_rows + j] = _r;
}
}
}
|
ast-dump-openmp-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:4:1, col:30>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:10:1, col:30>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:17:1, col:42>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:24:1, col:42>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPParallelForSimdDirective {{.*}} <line:31:1, col:42>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:40> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
bli_gemm_ref.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
#if 1
// An implementation that attempts to facilitate emission of vectorized
// instructions via constant loop bounds + #pragma omp simd directives.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
dim_t k, \
ctype* restrict alpha, \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict beta, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
ctype ab[ BLIS_STACK_BUF_MAX_SIZE \
/ sizeof( ctype ) ] \
__attribute__((aligned(BLIS_STACK_BUF_ALIGN_SIZE))); \
const inc_t rs_ab = nr; \
const inc_t cs_ab = 1; \
\
const inc_t cs_a = mr; \
const inc_t rs_b = nr; \
\
\
/* Initialize the accumulator elements in ab to zero. */ \
_Pragma( "omp simd" ) \
for ( dim_t i = 0; i < mr * nr; ++i ) \
{ \
PASTEMAC(ch,set0s)( ab[ i ] ); \
} \
\
/* Perform a series of k rank-1 updates into ab. */ \
for ( dim_t l = 0; l < k; ++l ) \
{ \
for ( dim_t i = 0; i < mr; ++i ) \
{ \
_Pragma( "omp simd" ) \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
PASTEMAC(ch,dots) \
( \
a[ i ], \
b[ j ], \
ab[ i*rs_ab + j*cs_ab ] \
); \
} \
} \
\
a += cs_a; \
b += rs_b; \
} \
\
/* Scale the result in ab by alpha. */ \
_Pragma( "omp simd" ) \
for ( dim_t i = 0; i < mr * nr; ++i ) \
{ \
PASTEMAC(ch,scals)( *alpha, ab[ i ] ); \
} \
\
/* Output/accumulate intermediate result ab based on the storage
of c and the value of beta. */ \
if ( cs_c == 1 ) \
{ \
/* C is row-stored. */ \
\
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
for ( dim_t i = 0; i < mr; ++i ) \
for ( dim_t j = 0; j < nr; ++j ) \
PASTEMAC(ch,copys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
c [ i*rs_c + j*1 ] \
); \
} \
else \
{ \
for ( dim_t i = 0; i < mr; ++i ) \
for ( dim_t j = 0; j < nr; ++j ) \
PASTEMAC(ch,xpbys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
*beta, \
c [ i*rs_c + j*1 ] \
); \
} \
} \
else \
{ \
/* C is column-stored or general-stored. */ \
\
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
for ( dim_t j = 0; j < nr; ++j ) \
for ( dim_t i = 0; i < mr; ++i ) \
PASTEMAC(ch,copys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
c [ i*rs_c + j*cs_c ] \
); \
} \
else \
{ \
for ( dim_t j = 0; j < nr; ++j ) \
for ( dim_t i = 0; i < mr; ++i ) \
PASTEMAC(ch,xpbys) \
( \
ab[ i*rs_ab + j*cs_ab ], \
*beta, \
c [ i*rs_c + j*cs_c ] \
); \
} \
} \
}
//INSERT_GENTFUNC_BASIC2( gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
GENTFUNC( float, s, gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 )
GENTFUNC( double, d, gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( scomplex, c, gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( dcomplex, z, gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 )
#else
// An implementation that uses variable loop bounds (queried from the context)
// and makes no use of #pragma omp simd.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
dim_t k, \
ctype* restrict alpha, \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict beta, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
const num_t dt = PASTEMAC(ch,type); \
\
const dim_t mr = bli_cntx_get_blksz_def_dt( dt, BLIS_MR, cntx ); \
const dim_t nr = bli_cntx_get_blksz_def_dt( dt, BLIS_NR, cntx ); \
\
const inc_t packmr = bli_cntx_get_blksz_max_dt( dt, BLIS_MR, cntx ); \
const inc_t packnr = bli_cntx_get_blksz_max_dt( dt, BLIS_NR, cntx ); \
\
const dim_t m = mr; \
const dim_t n = nr; \
\
const inc_t cs_a = packmr; \
\
const inc_t rs_b = packnr; \
\
ctype ab[ BLIS_STACK_BUF_MAX_SIZE \
/ sizeof( ctype ) ] \
__attribute__((aligned(BLIS_STACK_BUF_ALIGN_SIZE))); \
const inc_t rs_ab = 1; \
const inc_t cs_ab = mr; \
\
dim_t l, j, i; \
\
ctype ai; \
ctype bj; \
\
\
/* Initialize the accumulator elements in ab to zero. */ \
for ( i = 0; i < m * n; ++i ) \
{ \
PASTEMAC(ch,set0s)( *(ab + i) ); \
} \
\
/* Perform a series of k rank-1 updates into ab. */ \
for ( l = 0; l < k; ++l ) \
{ \
ctype* restrict abij = ab; \
\
/* In an optimized implementation, these two loops over MR and NR
are typically fully unrolled. */ \
for ( j = 0; j < n; ++j ) \
{ \
bj = *(b + j); \
\
for ( i = 0; i < m; ++i ) \
{ \
ai = *(a + i); \
\
PASTEMAC(ch,dots)( ai, bj, *abij ); \
\
abij += rs_ab; \
} \
} \
\
a += cs_a; \
b += rs_b; \
} \
\
/* Scale the result in ab by alpha. */ \
for ( i = 0; i < m * n; ++i ) \
{ \
PASTEMAC(ch,scals)( *alpha, *(ab + i) ); \
} \
\
/* If beta is zero, overwrite c with the scaled result in ab. Otherwise,
scale by beta and then add the scaled redult in ab. */ \
if ( PASTEMAC(ch,eq0)( *beta ) ) \
{ \
PASTEMAC(ch,copys_mxn)( m, \
n, \
ab, rs_ab, cs_ab, \
c, rs_c, cs_c ); \
} \
else \
{ \
PASTEMAC(ch,xpbys_mxn)( m, \
n, \
ab, rs_ab, cs_ab, \
beta, \
c, rs_c, cs_c ); \
} \
}
INSERT_GENTFUNC_BASIC2( gemm, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__abs_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_fp32
// op(A') function: GB_tran__abs_uint32_fp32
// C type: uint32_t
// A type: float
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z ; GB_CAST_UNSIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_fp32
(
uint32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: quanwang@openailab.com
*/
#include "conv_kernel_x86.h"
#include "wino_conv_kernel_x86.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#if __AVX__
#include <immintrin.h>
#endif
#ifndef _MSC_VER
#include <sys/time.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static int get_private_mem_size(struct tensor* filter)
{
if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32
return filter->elem_num * filter->elem_size * 4;
else
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static void interleave_uint8(struct tensor* filter, struct conv_priv_info* priv_info)
{
/* dequant uint8 weight to fp32 for simulator */
float* weight_fp32 = (float*)priv_info->interleave_buffer;
uint8_t* weight_uint8 = (uint8_t*)filter->data;
float scale = filter->scale;
int zero_point = filter->zero_point;
for (int i = 0; i < filter->elem_num; i++)
{
weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale;
}
}
void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_uint8(uint8_t* data_img, float* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
uint8_t* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
float in_fp32 = ((float)in[0] - (float)zero_point) * scale;
out[0] = in_fp32;
out++;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_int8(int8_t* data_img, int8_t* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
int8_t* out = data_col + (c * outh + h) * outw;
const int8_t* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
int8_t* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(int8_t));
out += w_low;
while (out < end)
{
in += sw;
out[0] = in[0];
out++;
}
memset(out, 0, (outw - w_high) * sizeof(int8_t));
}
else
{
memset(out, 0, outw * sizeof(int8_t));
}
}
}
}
static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size);
void* im2col_buf = (void*)priv_info->im2col_buffer;
if (input->data_type == TENGINE_DT_FP32)
{
im2col_fp32((float*)input_base, (float*)im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
else if (input->data_type == TENGINE_DT_UINT8)
{
im2col_uint8((uint8_t*)input_base, (float*)im2col_buf, input, output, param);
}
else if (input->data_type == TENGINE_DT_INT8)
{
im2col_int8((int8_t*)input_base, (int8_t*)im2col_buf, input, output, param);
}
else
{
TLOG_ERR("Input data type %d not to be supported.\n", input->data_type);
}
}
void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const float* img = pB + i;
float* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
#if __AVX__
_mm256_storeu_ps(tmp, _mm256_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
#endif // __SSE__
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
float* output0 = pC + (i)*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
float* output4 = pC + (i + 4) * N;
float* output5 = pC + (i + 5) * N;
float* output6 = pC + (i + 6) * N;
float* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
__m256 _sum4 = _mm256_set1_ps(0.0);
__m256 _sum5 = _mm256_set1_ps(0.0);
__m256 _sum6 = _mm256_set1_ps(0.0);
__m256 _sum7 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _va4 = _mm256_broadcast_ss(va + 4);
__m256 _va5 = _mm256_broadcast_ss(va + 5);
__m256 _va6 = _mm256_broadcast_ss(va + 6);
__m256 _va7 = _mm256_broadcast_ss(va + 7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m256 _sum0_7 = _mm256_set1_ps(0.0);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb + 1);
__m256 _vb2 = _mm256_broadcast_ss(vb + 2);
__m256 _vb3 = _mm256_broadcast_ss(vb + 3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va + 8);
__m256 _va2 = _mm256_loadu_ps(va + 16);
__m256 _va3 = _mm256_loadu_ps(va + 24);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00
_sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10
_sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20
_sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k < K; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
float output_sum0_7[8] = {0.f};
_mm256_storeu_ps(output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = pC + (i)*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m128 _sum0_3 = _mm_set1_ps(0.0);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00
_sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10
_sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20
_sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float output_sum0_3[4] = {0.f};
_mm_storeu_ps(output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 8;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif // __AVX__
output += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
#ifdef _WIN32
float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3];
#else
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#endif
#else
float sum0 = 0.f;
#endif // __AVX__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const int8_t* img = pB + i;
int8_t* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const int8_t* img = pB + i;
int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
int32_t* output0 = pC + (i)*N;
int32_t* output1 = pC + (i + 1) * N;
int32_t* output2 = pC + (i + 2) * N;
int32_t* output3 = pC + (i + 3) * N;
int32_t* output4 = pC + (i + 4) * N;
int32_t* output5 = pC + (i + 5) * N;
int32_t* output6 = pC + (i + 6) * N;
int32_t* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
__m256i _sum4 = _mm256_set1_epi32(0);
__m256i _sum5 = _mm256_set1_epi32(0);
__m256i _sum6 = _mm256_set1_epi32(0);
__m256i _sum7 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
// k0
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7);
va += 8;
// k1
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7);
va += 8;
// k2
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7);
va += 8;
// k3
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7);
va += 8;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _va4 = _mm256_set1_epi32(*(va + 4));
__m256i _va5 = _mm256_set1_epi32(*(va + 5));
__m256i _va6 = _mm256_set1_epi32(*(va + 6));
__m256i _va7 = _mm256_set1_epi32(*(va + 7));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7);
va += 8;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output0, _sum0);
_mm256_storeu_si256((__m256i* )output1, _sum1);
_mm256_storeu_si256((__m256i* )output2, _sum2);
_mm256_storeu_si256((__m256i* )output3, _sum3);
_mm256_storeu_si256((__m256i* )output4, _sum4);
_mm256_storeu_si256((__m256i* )output5, _sum5);
_mm256_storeu_si256((__m256i* )output6, _sum6);
_mm256_storeu_si256((__m256i* )output7, _sum7);
#else
int32_t sum0[8] = {0};
int32_t sum1[8] = {0};
int32_t sum2[8] = {0};
int32_t sum3[8] = {0};
int32_t sum4[8] = {0};
int32_t sum5[8] = {0};
int32_t sum6[8] = {0};
int32_t sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0_7 = _mm256_set1_epi32(0);
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _vb1 = _mm256_set1_epi32(*(vb + 1));
__m256i _vb2 = _mm256_set1_epi32(*(vb + 2));
__m256i _vb3 = _mm256_set1_epi32(*(vb + 3));
__m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
__m256i _va1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8)));
__m256i _va2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16)));
__m256i _va3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3);
va += 32;
vb += 4;
}
_sum0 = _mm256_add_epi32(_sum0, _sum1);
_sum2 = _mm256_add_epi32(_sum2, _sum3);
_sum0_7 = _mm256_add_epi32(_sum0_7, _sum0);
_sum0_7 = _mm256_add_epi32(_sum0_7, _sum2);
for (; k < K; k++) {
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
_sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7);
va += 8;
vb += 1;
}
int32_t output_sum0_7[8] = {0};
_mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
int32_t sum0 = 0;
int32_t sum1 = 0;
int32_t sum2 = 0;
int32_t sum3 = 0;
int32_t sum4 = 0;
int32_t sum5 = 0;
int32_t sum6 = 0;
int32_t sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
int32_t* output0 = pC + (i)*N;
int32_t* output1 = pC + (i + 1) * N;
int32_t* output2 = pC + (i + 2) * N;
int32_t* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = K + 4) {
// k0
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
va += 4;
// k1
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3);
va += 4;
// k2
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3);
va += 4;
// k3
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3);
va += 4;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
va += 4;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output0, _sum0);
_mm256_storeu_si256((__m256i* )output1, _sum1);
_mm256_storeu_si256((__m256i* )output2, _sum2);
_mm256_storeu_si256((__m256i* )output3, _sum3);
#else
int32_t sum0[8] = {0};
int32_t sum1[8] = {0};
int32_t sum2[8] = {0};
int32_t sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0_3 = _mm256_set1_epi32(0);
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k=0;
for (; k + 3 < K; k = k + 4)
{
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _vb1 = _mm256_set1_epi32(*(vb + 1));
__m256i _vb2 = _mm256_set1_epi32(*(vb + 2));
__m256i _vb3 = _mm256_set1_epi32(*(vb + 3));
__m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
__m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4)));
__m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8)));
__m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3);
va+=16;
vb+=4;
}
_sum0 = _mm256_add_epi32(_sum0, _sum1);
_sum2 = _mm256_add_epi32(_sum2, _sum3);
_sum0_3 = _mm256_add_epi32(_sum0_3, _sum0);
_sum0_3 = _mm256_add_epi32(_sum0_3, _sum2);
for (; k < K; k++)
{
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
_sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3);
va += 4;
vb += 1;
}
//drop last 4 value
int32_t output_sum0_3[4] = {0};
_mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
int32_t sum0 = 0;
int32_t sum1 = 0;
int32_t sum2 = 0;
int32_t sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
int32_t* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0);
va += 4;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
va += 1;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output, _sum0);
#else
int32_t sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif
output += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
int32_t sum0 = 0.f;
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = (float*)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = (float*)priv_info->im2col_buffer_pack4;
float* output_fp32 = (float*)output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = (float*)bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = (float*)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = (float*)priv_info->im2col_buffer_pack4;
uint8_t* output_uint8 = (uint8_t*)output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int* bias_int32 = NULL;
float bias_scale = 0.f;
if (bias)
{
bias_int32 = (int*)bias->data + outchan_g * group;
bias_scale = input->scale * filter->scale;
}
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float));
sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
/* process bias */
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_sgemm[output_off] += (float)bias_int32[i] * bias_scale;
}
}
}
/* process activation relu */
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
if (output_sgemm[output_off] > 6)
output_sgemm[output_off] = 6;
}
}
}
/* quant from fp32 to uint8 */
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int udata = (int)(round(output_sgemm[output_off] / output->scale) + output->zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[output_off] = udata;
}
}
sys_free(output_sgemm);
}
static void sgemm_int8(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
int8_t* interleave_int8 = (int8_t*)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
int8_t* im2col_pack4_int8 = (int8_t*)priv_info->im2col_buffer_pack4;
int8_t* output_int8 = (int8_t*)output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int32_t* bias_int32 = NULL;
if (bias)
bias_int32 = (int*)bias->data + outchan_g * group;
float input_scale = input->scale;
float* kernel_scales = filter->scale_list;
float output_scale = output->scale;
int8_t* filter_sgemm = interleave_int8;
int8_t* input_sgemm_pack4 = im2col_pack4_int8;
int32_t* output_sgemm_int32 = (int32_t*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(int32_t));
float* output_sgemm_fp32 = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float));
sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread);
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (bias)
output_sgemm_fp32[output_off] = (float)(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_sgemm_fp32[output_off] = (float)output_sgemm_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm_fp32[output_off] < 0)
output_sgemm_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm_fp32[output_off] < 0)
output_sgemm_fp32[output_off] = 0;
if (output_sgemm_fp32[output_off] > 6)
output_sgemm_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
for (int i = 0; i < outchan_g; i++)
{
#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int32_t data_i32 = (int32_t)(round(output_sgemm_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_sgemm_int32);
sys_free(output_sgemm_fp32);
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
// simulator uint8 inference with fp32
if (input->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return elem_size * output_xy * kernel_size;
}
int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (8 * K * (N / 8 + N % 8)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = (float*)priv_info->interleave_buffer;
float* pA_t = (float*)priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
const float* k4 = pA + (p + 4) * K;
const float* k5 = pA + (p + 5) * K;
const float* k6 = pA + (p + 6) * K;
const float* k7 = pA + (p + 7) * K;
float* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info)
{
int8_t* pA = (int8_t*)priv_info->interleave_buffer;
int8_t* pA_t = (int8_t*)priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const int8_t* k0 = pA + (p + 0) * K;
const int8_t* k1 = pA + (p + 1) * K;
const int8_t* k2 = pA + (p + 2) * K;
const int8_t* k3 = pA + (p + 3) * K;
const int8_t* k4 = pA + (p + 4) * K;
const int8_t* k5 = pA + (p + 5) * K;
const int8_t* k6 = pA + (p + 6) * K;
const int8_t* k7 = pA + (p + 7) * K;
int8_t* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const int8_t* k0 = pA + (p + 0) * K;
const int8_t* k1 = pA + (p + 1) * K;
const int8_t* k2 = pA + (p + 2) * K;
const int8_t* k3 = pA + (p + 3) * K;
int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const int8_t* k0 = pA + (p + 0) * K;
int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
if (input_tensor->data_type == TENGINE_DT_FP32)
{
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (input_tensor->data_type == TENGINE_DT_UINT8)
interleave_uint8(filter_tensor, priv_info);
else
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8)
conv_hcl_interleave_pack4_fp32(M, K, priv_info);
else
conv_hcl_interleave_pack4_int8(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
else
{
priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer;
priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size;
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
void* im2col_buffer = priv_info->im2col_buffer;
if (priv_info->external_interleave_pack4_mem)
{
if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8)
input_pack4_fp32(K, N, (float*)im2col_buffer, (float*)priv_info->im2col_buffer_pack4, num_thread);
else
input_pack4_int8(K, N, (int8_t*)im2col_buffer, (int8_t*)priv_info->im2col_buffer_pack4, num_thread);
}
else
{
priv_info->im2col_buffer_pack4 = im2col_buffer;
}
if (type == TENGINE_DT_FP32)
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else if (type == TENGINE_DT_UINT8)
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else if (type == TENGINE_DT_INT8)
sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else
{
TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type);
return -1;
}
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
GB_binop__ldexp_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ldexp_fp64
// A.*B function (eWiseMult): GB_AemultB__ldexp_fp64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__ldexp_fp64
// C+=b function (dense accum): GB_Cdense_accumb__ldexp_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ldexp_fp64
// C=scalar+B GB_bind1st__ldexp_fp64
// C=scalar+B' GB_bind1st_tran__ldexp_fp64
// C=A+scalar GB_bind2nd__ldexp_fp64
// C=A'+scalar GB_bind2nd_tran__ldexp_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ldexp (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ldexp (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LDEXP || GxB_NO_FP64 || GxB_NO_LDEXP_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ldexp_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ldexp_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ldexp_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ldexp_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ldexp_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ldexp_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ldexp (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ldexp_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ldexp (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ldexp (x, aij) ; \
}
GrB_Info GB_bind1st_tran__ldexp_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ldexp (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__ldexp_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
no_omp_cpu.c | /*
* Copyright (c) 2015 - 2022, Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sched.h>
#include <assert.h>
void no_omp_cpu(int num_cpu, cpu_set_t *no_omp)
{
int cpu_index, i;
for (i = 0; i < num_cpu; ++i) {
CPU_SET(i, no_omp);
}
#pragma omp parallel default(shared)
{
#pragma omp critical
{
cpu_index = sched_getcpu();
assert(cpu_index < num_cpu);
CPU_CLR(cpu_index, no_omp);
} /* end pragma omp critical */
} /* end pragam omp parallel */
}
int main(int argc, char **argv)
{
int i, num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
cpu_set_t *no_omp = CPU_ALLOC(num_cpu);
no_omp_cpu(num_cpu, no_omp);
printf("Free CPU list: ");
for (i = 0; i < num_cpu; ++i) {
if (CPU_ISSET(i, no_omp)) {
printf("%i ", i);
}
}
printf("\n\n");
CPU_FREE(no_omp);
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = atoi(argv[1]);
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
printf("Enter a number of bits: "); fflush(stdout);
char status = scanf("%u",&n);
// n =25;
//make sure the input makes sense
if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
unsigned int len = strlen(message);
printf("string length = %u \n", len);
/* Q1.1 Finish this line */
unsigned int charsPerInt = (n-1)/8;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
unsigned int val = 0;
#pragma omp parallel for shared(val)
for (unsigned int i=0;i<p-1;i++) {
if (val != 1 && modExp(g,i+1,p)==h) {
val = 1;
printf("Secret key found! x = %u \n", i);
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
flux_avx512.c | #include <stddef.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <mathimf.h>
#include <immintrin.h>
#include "geometry.h"
#include "bench.h"
#include "phy.h"
#include "core_kernel.h"
#define MAG0 (0.5 / 3)
#define MAG1 (-MAG0)
static void
_KRN_ComputeFlux(
const size_t nfnodes,
const uint32_t bsz,
const uint32_t *nfptr,
const double *f_xyz0,
const double *f_xyz1,
const double *f_xyz2,
const uint32_t *ie,
const uint32_t *part,
const uint32_t *n0,
const uint32_t *n1,
const double *x0,
const double *x1,
const double *x2,
const double *x3,
const double *q,
const size_t dofs,
const size_t snfc,
const uint32_t *snfic,
const double *xyz0,
const double *xyz1,
const double *xyz2,
const uint32_t *sn0,
const uint32_t *sn1,
const uint32_t *sn2,
const double *w0termsx,
const double *w0termsy,
const double *w0termsz,
const double *w1termsx,
const double *w1termsy,
const double *w1termsz,
double *gradx0,
double *gradx1,
double *gradx2,
double *r)
{
memset(gradx0, 0, dofs * sizeof(double));
memset(gradx1, 0, dofs * sizeof(double));
memset(gradx2, 0, dofs * sizeof(double));
memset(r, 0, dofs * sizeof(double));
/*
Calculates the gradients at the nodes using weighted least squares
This solves using Gram-Schmidt
*/
#pragma omp parallel
{
const uint32_t t = (unsigned int) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = (unsigned int) bsz * node0;
const uint32_t idx1 = (unsigned int) bsz * node1;
double dq;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
termx = w0termsx[i];
termy = w0termsy[i];
termz = w0termsz[i];
dq = q[idx1 + 0] - q[idx0 + 0];
gradx0[idx0 + 0] += termx * dq;
gradx1[idx0 + 0] += termy * dq;
gradx2[idx0 + 0] += termz * dq;
dq = q[idx1 + 1] - q[idx0 + 1];
gradx0[idx0 + 1] += termx * dq;
gradx1[idx0 + 1] += termy * dq;
gradx2[idx0 + 1] += termz * dq;
dq = q[idx1 + 2] - q[idx0 + 2];
gradx0[idx0 + 2] += termx * dq;
gradx1[idx0 + 2] += termy * dq;
gradx2[idx0 + 2] += termz * dq;
dq = q[idx1 + 3] - q[idx0 + 3];
gradx0[idx0 + 3] += termx * dq;
gradx1[idx0 + 3] += termy * dq;
gradx2[idx0 + 3] += termz * dq;
}
if(part[node1] == t)
{
termx = w1termsx[i];
termy = w1termsy[i];
termz = w1termsz[i];
dq = q[idx0 + 0] - q[idx1 + 0];
gradx0[idx1 + 0] += termx * dq;
gradx1[idx1 + 0] += termy * dq;
gradx2[idx1 + 0] += termz * dq;
dq = q[idx0 + 1] - q[idx1 + 1];
gradx0[idx1 + 1] += termx * dq;
gradx1[idx1 + 1] += termy * dq;
gradx2[idx1 + 1] += termz * dq;
dq = q[idx0 + 2] - q[idx1 + 2];
gradx0[idx1 + 2] += termx * dq;
gradx1[idx1 + 2] += termy * dq;
gradx2[idx1 + 2] += termz * dq;
dq = q[idx0 + 3] - q[idx1 + 3];
gradx0[idx1 + 3] += termx * dq;
gradx1[idx1 + 3] += termy * dq;
gradx2[idx1 + 3] += termz * dq;
}
}
}
/*
Calculates the fluxes on the face and performs the flux balance
*/
/* AVX512 Registers */
const __m512d _zero = _mm512_set1_pd(0);
const __m512d _pos1 = _mm512_set1_pd(1.0);
const __m512d _pos2 = _mm512_set1_pd(2.0);
const __m512d _half = _mm512_set1_pd(0.5);
const __m512d _nhalf = _mm512_set1_pd(-0.5);
const __m512d _nu95 = _mm512_set1_pd(0.95);
const __m512d _beta = _mm512_set1_pd(B);
#ifdef ARCH_SKY
//const __m512d _rbeta = _mm512_rcp14_pd(_beta);
const __m512d _rbeta = _mm512_div_pd(_pos1, _beta);
#else
const __m512d _rbeta = _mm512_rcp28_pd(_beta);
#endif
const __m256i _bsz = _mm256_set1_epi32(bsz);
const __m256i _shift1 = _mm256_set1_epi32(1);
const __m256i _shift2 = _mm256_set1_epi32(2);
const __m256i _shift3 = _mm256_set1_epi32(3);
const __m512i _ng = _mm512_set1_epi32(-1);
const __m512d _und = _mm512_undefined_pd();
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
const uint32_t lim = ie1 - ((ie1-ie0) % 8);
const __m512i _t = _mm512_set1_epi32(t);
uint32_t i;
for(i = ie0; i < lim; i+=8)
{
const __m512d _xn = _mm512_load_pd((void const *) &x0[i]);
const __m512d _yn = _mm512_load_pd((void const *) &x1[i]);
const __m512d _zn = _mm512_load_pd((void const *) &x2[i]);
const __m512d _ln = _mm512_load_pd((void const *) &x3[i]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
const __m512d _fdot = _mm512_abs_pd(_xn);
__mmask _k0;
__m512d _dot, _X1, _Y1, _Z1;
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_LT_OS);
_X1 = _mm512_mask_fnmadd_pd(_xn, _k0, _xn, _pos1);
_Y1 = _mm512_mask_fnmadd_pd(_yn, _k0, _xn, _zero);
_Z1 = _mm512_mask_fnmadd_pd(_zn, _k0, _xn, _zero);
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_GE_OS);
_X1 = _mm512_mask_fnmadd_pd(_X1, _k0, _yn, _zero);
_Y1 = _mm512_mask_fnmadd_pd(_Y1, _k0, _yn, _pos1);
_Z1 = _mm512_mask_fnmadd_pd(_Z1, _k0, _yn, _zero);
/*
Normalize the first vector
*/
__m512d _size;
_size = _mm512_mul_pd(_X1, _X1);
_size = _mm512_fmadd_pd(_Y1, _Y1, _size);
_size = _mm512_fmadd_pd(_Z1, _Z1, _size);
#ifdef ARCH_SKY
_size = _mm512_sqrt_pd(_size);
_size = _mm512_div_pd(_pos1, _size);
//_size = _mm512_rsqrt14_pd(_size);
#else
_size = _mm512_rsqrt28_pd(_size);
#endif
_X1 = _mm512_mul_pd(_X1, _size);
_Y1 = _mm512_mul_pd(_Y1, _size);
_Z1 = _mm512_mul_pd(_Z1, _size);
const __m256i _n0 = _mm256_load_si256((__m256i const *) &n0[i]);
const __m256i _n1 = _mm256_load_si256((__m256i const *) &n1[i]);
const __m512d _x00 = _mm512_i32gather_pd(_n0, &xyz0[0], 8);
const __m512d _x01 = _mm512_i32gather_pd(_n0, &xyz1[0], 8);
const __m512d _x02 = _mm512_i32gather_pd(_n0, &xyz2[0], 8);
const __m512d _x10 = _mm512_i32gather_pd(_n1, &xyz0[0], 8);
const __m512d _x11 = _mm512_i32gather_pd(_n1, &xyz1[0], 8);
const __m512d _x12 = _mm512_i32gather_pd(_n1, &xyz2[0], 8);
const __m512d _xmean = _mm512_mul_pd(_half, _mm512_add_pd(_x00, _x10));
const __m512d _ymean = _mm512_mul_pd(_half, _mm512_add_pd(_x01, _x11));
const __m512d _zmean = _mm512_mul_pd(_half, _mm512_add_pd(_x02, _x12));
/*
Take cross-product of normal and V1 to get V2
*/
const __m512d _X2 = _mm512_fmsub_pd(_yn, _Z1, _mm512_mul_pd(_zn, _Y1));
const __m512d _Y2 = _mm512_fmsub_pd(_zn, _X1, _mm512_mul_pd(_xn, _Z1));
const __m512d _Z2 = _mm512_fmsub_pd(_xn, _Y1, _mm512_mul_pd(_yn, _X1));
/*
Compute the stride indices
*/
const __m256i _idx0 = _mm256_mullo_epi32(_bsz, _n0);
const __m256i _idx1 = _mm256_mullo_epi32(_bsz, _n1);
const __m256i _idx01 = _mm256_add_epi32(_idx0, _shift1);
const __m256i _idx11 = _mm256_add_epi32(_idx1, _shift1);
const __m256i _idx02 = _mm256_add_epi32(_idx0, _shift2);
const __m256i _idx12 = _mm256_add_epi32(_idx1, _shift2);
const __m256i _idx03 = _mm256_add_epi32(_idx0, _shift3);
const __m256i _idx13 = _mm256_add_epi32(_idx1, _shift3);
/*
Get variables on "left" and "right" side of face
*/
__m512d _q;
__m512d _ubarL, _ubarR;
__m512d _rx, _ry, _rz;
__m512d _g0, _g1, _g2;
__m512d _pL, _uL, _vL, _wL;
__m512d _pR, _uR, _vR, _wR;
/* Left */
_rx = _mm512_sub_pd(_xmean, _x00);
_ry = _mm512_sub_pd(_ymean, _x01);
_rz = _mm512_sub_pd(_zmean, _x02);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx0, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx0, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx0, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx0, &q[0], 8);
_pL = _mm512_fmadd_pd(_g0, _rx, _q);
_pL = _mm512_fmadd_pd(_g1, _ry, _pL);
_pL = _mm512_fmadd_pd(_g2, _rz, _pL);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx01, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx01, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx01, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx01, &q[0], 8);
_uL = _mm512_fmadd_pd(_g0, _rx, _q);
_uL = _mm512_fmadd_pd(_g1, _ry, _uL);
_uL = _mm512_fmadd_pd(_g2, _rz, _uL);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx02, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx02, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx02, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx02, &q[0], 8);
_vL = _mm512_fmadd_pd(_g0, _rx, _q);
_vL = _mm512_fmadd_pd(_g1, _ry, _vL);
_vL = _mm512_fmadd_pd(_g2, _rz, _vL);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx03, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx03, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx03, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx03, &q[0], 8);
_wL = _mm512_fmadd_pd(_g0, _rx, _q);
_wL = _mm512_fmadd_pd(_g1, _ry, _wL);
_wL = _mm512_fmadd_pd(_g2, _rz, _wL);
_ubarL = _mm512_mul_pd(_xn, _uL);
_ubarL = _mm512_fmadd_pd(_yn, _vL, _ubarL);
_ubarL = _mm512_fmadd_pd(_zn, _wL, _ubarL);
/* Right */
_rx = _mm512_sub_pd(_xmean, _x10);
_ry = _mm512_sub_pd(_ymean, _x11);
_rz = _mm512_sub_pd(_zmean, _x12);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx1, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx1, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx1, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx1, &q[0], 8);
_pR = _mm512_fmadd_pd(_g0, _rx, _q);
_pR = _mm512_fmadd_pd(_g1, _ry, _pR);
_pR = _mm512_fmadd_pd(_g2, _rz, _pR);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx11, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx11, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx11, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx11, &q[0], 8);
_uR = _mm512_fmadd_pd(_g0, _rx, _q);
_uR = _mm512_fmadd_pd(_g1, _ry, _uR);
_uR = _mm512_fmadd_pd(_g2, _rz, _uR);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx12, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx12, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx12, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx12, &q[0], 8);
_vR = _mm512_fmadd_pd(_g0, _rx, _q);
_vR = _mm512_fmadd_pd(_g1, _ry, _vR);
_vR = _mm512_fmadd_pd(_g2, _rz, _vR);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx13, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx13, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx13, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx13, &q[0], 8);
_wR = _mm512_fmadd_pd(_g0, _rx, _q);
_wR = _mm512_fmadd_pd(_g1, _ry, _wR);
_wR = _mm512_fmadd_pd(_g2, _rz, _wR);
_ubarR = _mm512_mul_pd(_xn, _uR);
_ubarR = _mm512_fmadd_pd(_yn, _vR, _ubarR);
_ubarR = _mm512_fmadd_pd(_zn, _wR, _ubarR);
const __m512d _dp = _mm512_sub_pd(_pR, _pL);
const __m512d _du = _mm512_sub_pd(_uR, _uL);
const __m512d _dv = _mm512_sub_pd(_vR, _vL);
const __m512d _dw = _mm512_sub_pd(_wR, _wL);
/* Compute averages for velocity variables only */
const __m512d _u = _mm512_mul_pd(_half, _mm512_add_pd(_uL, _uR));
const __m512d _v = _mm512_mul_pd(_half, _mm512_add_pd(_vL, _vR));
const __m512d _w = _mm512_mul_pd(_half, _mm512_add_pd(_wL, _wR));
__m512d _ubar;
_ubar = _mm512_mul_pd(_xn, _u);
_ubar = _mm512_fmadd_pd(_yn, _v, _ubar);
_ubar = _mm512_fmadd_pd(_zn, _w, _ubar);
/* Compute Phi's */
__m512d _phi1;
_phi1 = _mm512_mul_pd(_xn, _beta);
_phi1 = _mm512_fmadd_pd(_u, _ubar, _phi1);
__m512d _phi2;
_phi2 = _mm512_mul_pd(_yn, _beta);
_phi2 = _mm512_fmadd_pd(_v, _ubar, _phi2);
__m512d _phi3;
_phi3 = _mm512_mul_pd(_zn, _beta);
_phi3 = _mm512_fmadd_pd(_w, _ubar, _phi3);
__m512d _phi4;
_phi4 = _mm512_mul_pd(_Z2, _phi2);
_phi4 = _mm512_fmsub_pd(_Y2, _phi3, _phi4);
__m512d _phi5;
_phi5 = _mm512_mul_pd(_X2, _phi3);
_phi5 = _mm512_fmsub_pd(_Z2, _phi1, _phi5);
__m512d _phi6;
_phi6 = _mm512_mul_pd(_Y2, _phi1);
_phi6 = _mm512_fmsub_pd(_X2, _phi2, _phi6);
__m512d _phi7;
_phi7 = _mm512_mul_pd(_Y1, _phi3);
_phi7 = _mm512_fmsub_pd(_Z1, _phi2, _phi7);
__m512d _phi8;
_phi8 = _mm512_mul_pd(_Z1, _phi1);
_phi8 = _mm512_fmsub_pd(_X1, _phi3, _phi8);
__m512d _phi9;
_phi9 = _mm512_mul_pd(_X1, _phi2);
_phi9 = _mm512_fmsub_pd(_Y1, _phi1, _phi9);
/*
Compute eigenvalues, eigenvectors, and strengths
*/
const __m512d _c2 = _mm512_fmadd_pd(_ubar, _ubar, _beta);
#ifdef ARCH_SKY
//const __m512d _c = _mm512_mul_pd(_mm512_rsqrt14_pd(_c2), _c2);
const __m512d _c = _mm512_sqrt_pd(_c2);
//const __m512d _c2r = _mm512_rcp14_pd(_c2);
const __m512d _c2r = _mm512_div_pd(_pos1, _c2);
#else
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt28_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp28_pd(_c2);
#endif
const __m512d _bac = _mm512_add_pd(_ubar, _c);
const __m512d _bsc = _mm512_sub_pd(_ubar, _c);
/*
Components of T(inverse)
*/
__m512d _ti11;
_ti11 = _mm512_mul_pd(_u, _phi4);
_ti11 = _mm512_fmadd_pd(_v, _phi5, _ti11);
_ti11 = _mm512_fmadd_pd(_w, _phi6, _ti11);
_ti11 = _mm512_fnmadd_pd(_ti11, _rbeta, _zero);
__m512d _ti21;
_ti21 = _mm512_mul_pd(_u, _phi7);
_ti21 = _mm512_fmadd_pd(_v, _phi8, _ti21);
_ti21 = _mm512_fmadd_pd(_w, _phi9, _ti21);
_ti21 = _mm512_fnmadd_pd(_ti21, _rbeta, _zero);
__m512d _ti31;
_ti31 = _mm512_mul_pd(_half, _mm512_sub_pd(_c, _ubar));
_ti31 = _mm512_mul_pd(_ti31, _rbeta);
__m512d _ti41;
_ti41 = _mm512_mul_pd(_nhalf, _bac);
_ti41 = _mm512_mul_pd(_ti41, _rbeta);
/*
jumps (T(inverse) * dq)
*/
__m512d _dv1;
_dv1 = _mm512_mul_pd(_ti11, _dp);
_dv1 = _mm512_fmadd_pd(_phi4, _du, _dv1);
_dv1 = _mm512_fmadd_pd(_phi5, _dv, _dv1);
_dv1 = _mm512_fmadd_pd(_phi6, _dw, _dv1);
_dv1 = _mm512_mul_pd(_dv1, _c2r);
__m512d _dv2;
_dv2 = _mm512_mul_pd(_ti21, _dp);
_dv2 = _mm512_fmadd_pd(_phi7, _du, _dv2);
_dv2 = _mm512_fmadd_pd(_phi8, _dv, _dv2);
_dv2 = _mm512_fmadd_pd(_phi9, _dw, _dv2);
_dv2 = _mm512_mul_pd(_dv2, _c2r);
__m512d _dv34;
_dv34 = _mm512_mul_pd(_xn, _du);
_dv34 = _mm512_fmadd_pd(_yn, _dv, _dv34);
_dv34 = _mm512_fmadd_pd(_zn, _dw, _dv34);
__m512d _dv3;
_dv3 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti31), _dp, _dv34);
_dv3 = _mm512_mul_pd(_dv3, _mm512_mul_pd(_half, _c2r));
__m512d _dv4;
_dv4 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti41), _dp, _dv34);
_dv4 = _mm512_mul_pd(_dv4, _mm512_mul_pd(_half, _c2r));
/*
Now get elements of T
*/
const __m512d _r13 = _mm512_mul_pd(_c, _beta);
__m512d _r23;
_r23 = _mm512_mul_pd(_u, _bac);
_r23 = _mm512_fmadd_pd(_xn, _beta, _r23);
__m512d _r33;
_r33 = _mm512_mul_pd(_v, _bac);
_r33 = _mm512_fmadd_pd(_yn, _beta, _r33);
__m512d _r43;
_r43 = _mm512_mul_pd(_w, _bac);
_r43 = _mm512_fmadd_pd(_zn, _beta, _r43);
const __m512d _r14 = _mm512_fnmadd_pd(_c, _beta, _zero);
__m512d _r24;
_r24 = _mm512_mul_pd(_u, _bsc);
_r24 = _mm512_fmadd_pd(_xn, _beta, _r24);
__m512d _r34;
_r34 = _mm512_mul_pd(_v, _bsc);
_r34 = _mm512_fmadd_pd(_yn, _beta, _r34);
__m512d _r44;
_r44 = _mm512_mul_pd(_w, _bsc);
_r44 = _mm512_fmadd_pd(_zn, _beta, _r44);
/*
Calculate T* |lambda| * T(inverse)
*/
const __m512d _eig1 = _mm512_abs_pd(_ubar);
const __m512d _eig2 = _mm512_abs_pd(_bac);
const __m512d _eig3 = _mm512_abs_pd(_bsc);
__m512d _t1;
_t1 = _mm512_mul_pd(_mm512_mul_pd(_eig2, _r13), _dv3);
_t1 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r14), _dv4, _t1);
__m512d _t2;
_t2 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _X1), _dv1);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _X2), _dv2, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r23), _dv3, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r24), _dv4, _t2);
__m512d _t3;
_t3 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Y1), _dv1);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Y2), _dv2, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r33), _dv3, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r34), _dv4, _t3);
__m512d _t4;
_t4 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Z1), _dv1);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Z2), _dv2, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r43), _dv3, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r44), _dv4, _t4);
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
/* Left Side */
__m512d _fluxp1;
_fluxp1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarL);
__m512d _fluxp2;
_fluxp2 = _mm512_mul_pd(_uL, _ubarL);
_fluxp2 = _mm512_fmadd_pd(_xn, _pL, _fluxp2);
_fluxp2 = _mm512_mul_pd(_ln, _fluxp2);
__m512d _fluxp3;
_fluxp3 = _mm512_mul_pd(_vL, _ubarL);
_fluxp3 = _mm512_fmadd_pd(_yn, _pL, _fluxp3);
_fluxp3 = _mm512_mul_pd(_ln, _fluxp3);
__m512d _fluxp4;
_fluxp4 = _mm512_mul_pd(_wL, _ubarL);
_fluxp4 = _mm512_fmadd_pd(_zn, _pL, _fluxp4);
_fluxp4 = _mm512_mul_pd(_ln, _fluxp4);
/* Right Side */
__m512d _fluxm1;
_fluxm1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarR);
__m512d _fluxm2;
_fluxm2 = _mm512_mul_pd(_uR, _ubarR);
_fluxm2 = _mm512_fmadd_pd(_xn, _pR, _fluxm2);
_fluxm2 = _mm512_mul_pd(_ln, _fluxm2);
__m512d _fluxm3;
_fluxm3 = _mm512_mul_pd(_vR, _ubarR);
_fluxm3 = _mm512_fmadd_pd(_yn, _pR, _fluxm3);
_fluxm3 = _mm512_mul_pd(_ln, _fluxm3);
__m512d _fluxm4;
_fluxm4 = _mm512_mul_pd(_wR, _ubarR);
_fluxm4 = _mm512_fmadd_pd(_zn, _pR, _fluxm4);
_fluxm4 = _mm512_mul_pd(_ln, _fluxm4);
__m512d _res1;
_res1 = _mm512_fnmadd_pd(_ln, _t1, _mm512_add_pd(_fluxm1, _fluxp1));
__m512d _res2;
_res2 = _mm512_fnmadd_pd(_ln, _t2, _mm512_add_pd(_fluxm2, _fluxp2));
__m512d _res3;
_res3 = _mm512_fnmadd_pd(_ln, _t3, _mm512_add_pd(_fluxm3, _fluxp3));
__m512d _res4;
_res4 = _mm512_fnmadd_pd(_ln, _t4, _mm512_add_pd(_fluxm4, _fluxp4));
/* Update the residual */
__m512i _node, _part;
__mmask _next;
_node = _mm512_castsi256_si512(_n0);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Conflict detection instructions with multiple node update */
/* Node 0 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx0, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx0, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx01, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx01, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx02, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx02, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx03, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx03, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
_node = _mm512_castsi256_si512(_n1);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Node 1 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx1, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx1, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx11, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx11, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx12, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx12, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx13, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx13, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
}
/* Remainder loop */
for(i = lim; i < ie1; i++)
{
uint32_t node0 = n0[i];
uint32_t node1 = n1[i];
double xn = x0[i];
double yn = x1[i];
double zn = x2[i];
double ln = x3[i];
double xmean = 0.5f * (xyz0[node0] + xyz0[node1]);
double ymean = 0.5f * (xyz1[node0] + xyz1[node1]);
double zmean = 0.5f * (xyz2[node0] + xyz2[node1]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal and V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Get variables on "left" and "right" side of face
*/
double rx = xmean - xyz0[node0];
double ry = ymean - xyz1[node0];
double rz = zmean - xyz2[node0];
uint32_t idx0 = (unsigned int) bsz * node0;
uint32_t idx1 = (unsigned int) bsz * node1;
// P
double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx;
pL += gradx1[idx0 + 0] * ry;
pL += gradx2[idx0 + 0] * rz;
// Velocity u
double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx;
uL += gradx1[idx0 + 1] * ry;
uL += gradx2[idx0 + 1] * rz;
// Velocity v
double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx;
vL += gradx1[idx0 + 2] * ry;
vL += gradx2[idx0 + 2] * rz;
// Velocity w
double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx;
wL += gradx1[idx0 + 3] * ry;
wL += gradx2[idx0 + 3] * rz;
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
rx = xmean - xyz0[node1];
ry = ymean - xyz1[node1];
rz = zmean - xyz2[node1];
// P
double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx;
pR += gradx1[idx1 + 0] * ry;
pR += gradx2[idx1 + 0] * rz;
// Velocity u
double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx;
uR += gradx1[idx1 + 1] * ry;
uR += gradx2[idx1 + 1] * rz;
// Velocity v
double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx;
vR += gradx1[idx1 + 2] * ry;
vR += gradx2[idx1 + 2] * rz;
// Velocity w
double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx;
wR += gradx1[idx1 + 3] * ry;
wR += gradx2[idx1 + 3] * rz;
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/* Compute averages */
//double p = 0.5f * (pL + pR);
double u = 0.5f * (uL + uR);
double v = 0.5f * (vL + vR);
double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double phi1 = xn * B;
phi1 += u * ubar;
double phi2 = yn * B;
phi2 += v * ubar;
double phi3 = zn * B;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double c2 = ubar * ubar + B;
double c = sqrt(c2);
/*
Now compute eigenvalues, eigenvectors, and strengths
*/
double eig1 = fabs(ubar);
double eig2 = fabs(ubar);
double eig3 = fabs(ubar + c);
double eig4 = fabs(ubar - c);
double dp = pR - pL;
double du = uR - uL;
double dv = vR - vL;
double dw = wR - wL;
/*
Components of T(inverse)
*/
double ti11 = u * phi4;
ti11 += v * phi5;
ti11 += w * phi6;
ti11 = -ti11 / B;
double ti21 = u * phi7;
ti21 += v * phi8;
ti21 += w * phi9;
ti21 = -ti21 / B;
double ti31 = 0.5f * (c - ubar);
ti31 /= B;
double ti41 = -0.5f * (c + ubar);
ti41 /= B;
/*
jumps (T(inverse) * dq)
*/
double dv1 = ti11 * dp;
dv1 += phi4 * du;
dv1 += phi5 * dv;
dv1 += phi6 * dw;
dv1 /= c2;
double dv2 = ti21 * dp;
dv2 += phi7 * du;
dv2 += phi8 * dv;
dv2 += phi9 * dw;
dv2 /= c2;
double dv3 = 2.f * ti31 * dp;
dv3 += xn * du;
dv3 += yn * dv;
dv3 += zn * dw;
dv3 *= 0.5f / c2;
double dv4 = 2.f * ti41 * dp;
dv4 += xn * du;
dv4 += yn * dv;
dv4 += zn * dw;
dv4 *= 0.5f / c2;
/*
Now get elements of T
*/
double r13 = c * B;
double r23 = u * (ubar + c);
r23 += xn * B;
double r33 = v * (ubar + c);
r33 += yn * B;
double r43 = w * (ubar + c);
r43 += zn * B;
double r14 = -c * B;
double r24 = u * (ubar - c);
r24 += xn * B;
double r34 = v * (ubar - c);
r34 += yn * B;
double r44 = w * (ubar - c);
r44 += zn * B;
/*
Calculate T* |lambda| * T(inverse)
*/
double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4;
double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2;
t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4;
double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2;
t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4;
double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2;
t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4;
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
double fluxp1 = ln * B * ubarL;
double fluxp2 = ln * (uL * ubarL + xn * pL);
double fluxp3 = ln * (vL * ubarL + yn * pL);
double fluxp4 = ln * (wL * ubarL + zn * pL);
/*
Now the right side
*/
double fluxm1 = ln * B * ubarR;
double fluxm2 = ln * (uR * ubarR + xn * pR);
double fluxm3 = ln * (vR * ubarR + yn * pR);
double fluxm4 = ln * (wR * ubarR + zn * pR);
double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1);
double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2);
double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3);
double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4);
if(part[node0] == t)
{
r[idx0 + 0] = r[idx0 + 0] + res1;
r[idx0 + 1] = r[idx0 + 1] + res2;
r[idx0 + 2] = r[idx0 + 2] + res3;
r[idx0 + 3] = r[idx0 + 3] + res4;
}
if(part[node1] == t)
{
r[idx1 + 0] = r[idx1 + 0] - res1;
r[idx1 + 1] = r[idx1 + 1] - res2;
r[idx1 + 2] = r[idx1 + 2] - res3;
r[idx1 + 3] = r[idx1 + 3] - res4;
}
}
}
uint32_t i;
for(i = 0; i < snfc; i++)
{
uint32_t if0 = snfic[i];
uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for
for(j = if0; j < if1; j++)
{
uint32_t node0 = sn0[j];
uint32_t node1 = sn1[j];
uint32_t node2 = sn2[j];
double p1 = q[bsz * node0];
double p2 = q[bsz * node1];
double p3 = q[bsz * node2];
double ax = xyz0[node1] - xyz0[node0];
double ay = xyz1[node1] - xyz1[node0];
double az = xyz2[node1] - xyz2[node0];
double bx = xyz0[node2] - xyz0[node0];
double by = xyz1[node2] - xyz1[node0];
double bz = xyz2[node2] - xyz2[node0];
/*
Normal points away from grid interior.
Magnitude is 1/3 area of surface triangle.
*/
double xn = ay * bz;
xn -= az * by;
xn *= MAG1;
double yn = ax * bz;
yn -= az * bx;
yn *= MAG0;
double zn = ax * by;
zn -= ay * bx;
zn *= MAG1;
double pa = 0.125f * (p2 + p3);
pa += 0.75f * p1;
double pb = 0.125f * (p3 + p1);
pb += 0.75f * p2;
double pc = 0.125f * (p1 + p2);
pc += 0.75f * p3;
uint32_t idx;
idx = bsz * node0;
r[idx + 1] += xn * pa;
r[idx + 2] += yn * pa;
r[idx + 3] += zn * pa;
idx = bsz * node1;
r[idx + 1] += xn * pb;
r[idx + 2] += yn * pb;
r[idx + 3] += zn * pb;
idx = bsz * node2;
r[idx + 1] += xn * pc;
r[idx + 2] += yn * pc;
r[idx + 3] += zn * pc;
}
}
/* Do the free boundaries */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
/*
Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn
has the magnitude of the face contained in it.
*/
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double area = xn * xn;
area += yn * yn;
area += zn * zn;
area = sqrt(area);
xn /= area;
yn /= area;
zn /= area;
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector (V1)
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal with V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Calculate elements of T and T(inverse) evaluated at free-stream
*/
double ubar0 = xn * U;
ubar0 += yn * V;
ubar0 += zn * W;
double c20 = ubar0 * ubar0 + B;
double c0 = sqrt(c20);
double phi1 = xn * B;
phi1 += U * ubar0;
double phi2 = yn * B;
phi2 += V * ubar0;
double phi3 = zn * B;
phi3 += W * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double t13 = c0 * B;
double t23 = U * (ubar0 + c0);
t23 += xn * B;
double t33 = V * (ubar0 + c0);
t33 += yn * B;
double t43 = W * (ubar0 + c0);
t43 += zn * B;
double t14 = -c0 * B;
double t24 = U * (ubar0 - c0);
t24 += xn * B;
double t34 = V * (ubar0 - c0);
t34 += yn * B;
double t44 = W * (ubar0 - c0);
t44 += zn * B;
double ti11 = U * phi4;
ti11 += V * phi5;
ti11 += W * phi6;
ti11 = -ti11/B;
double ti21 = U * phi7;
ti21 += V * phi8;
ti21 += W * phi9;
ti21 = -ti21/B;
double ti31 = 0.5f * (c0 - ubar0);
ti31 /= B;
double ti41 = -0.5f * (c0 + ubar0);
ti41 /= B;
/*
Now, get the variables on the "inside"
*/
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/*
If ubar is negative, take the reference condition from outside
*/
double pr, ur, vr, wr;
if(un > 0.f)
{
pr = pi;
ur = ui;
vr = vi;
wr = wi;
}
else
{
pr = P;
ur = U;
vr = V;
wr = W;
}
/*
Set rhs
*/
double rhs1 = ti11 * pr;
rhs1 += phi4 * ur;
rhs1 += phi5 * vr;
rhs1 += phi6 * wr;
rhs1 /= c20;
double rhs2 = ti21 * pr;
rhs2 += phi7 * ur;
rhs2 += phi8 * vr;
rhs2 += phi9 * wr;
rhs2 /= c20;
double rhs3 = 2.f * ti31 * pi;
rhs3 += xn * ui;
rhs3 += yn * vi;
rhs3 += zn * wi;
rhs3 = 0.5f * rhs3 / c20;
double rhs4 = 2.f * ti41 * P;
rhs4 += xn * U;
rhs4 += yn * V;
rhs4 += zn * W;
rhs4 = 0.5f * rhs4 / c20;
/*
Now do matrix multiplication to get values on boundary
*/
double pb = t13 * rhs3;
pb += t14 * rhs4;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double ubar = xn * ub;
ubar += yn * vb;
ubar += zn * wb;
uint32_t idx = (unsigned int) bsz * n;
r[idx + 0] += area * B * ubar;
r[idx + 1] += area * (ub * ubar + xn * pb);
r[idx + 2] += area * (vb * ubar + yn * pb);
r[idx + 3] += area * (wb * ubar + zn * pb);
}
}
void
ComputeFlux(const GEOMETRY *g, const double *q, GRADIENT *grad, double *r)
{
BENCH start_bench = rdbench();
_KRN_ComputeFlux(
g->b->f->sz,
g->c->b,
g->b->f->nptr,
g->b->f->xyz->x0,
g->b->f->xyz->x1,
g->b->f->xyz->x2,
g->s->i,
g->n->part,
g->e->eptr->n0,
g->e->eptr->n1,
g->e->xyzn->x0,
g->e->xyzn->x1,
g->e->xyzn->x2,
g->e->xyzn->x3,
q,
g->c->sz,
g->t->sz,
g->t->i,
g->n->xyz->x0,
g->n->xyz->x1,
g->n->xyz->x2,
g->b->fc->fptr->n0,
g->b->fc->fptr->n1,
g->b->fc->fptr->n2,
g->e->w->w0->x0,
g->e->w->w0->x1,
g->e->w->w0->x2,
g->e->w->w1->x0,
g->e->w->w1->x1,
g->e->w->w1->x2,
grad->x0,
grad->x1,
grad->x2,
r
);
fun3d_log(start_bench, KERNEL_FLUX);
}
|
tree-vect-loop.c | /* Loop Vectorization
Copyright (C) 2003-2018 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com> and
Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "optabs-tree.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "cfgloop.h"
#include "params.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "gimple-fold.h"
#include "cgraph.h"
#include "tree-cfg.h"
#include "tree-if-conv.h"
#include "internal-fn.h"
#include "tree-vector-builder.h"
#include "vec-perm-indices.h"
#include "tree-eh.h"
/* Loop Vectorization Pass.
This pass tries to vectorize loops.
For example, the vectorizer transforms the following simple loop:
short a[N]; short b[N]; short c[N]; int i;
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
as if it was manually vectorized by rewriting the source code into:
typedef int __attribute__((mode(V8HI))) v8hi;
short a[N]; short b[N]; short c[N]; int i;
v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
v8hi va, vb, vc;
for (i=0; i<N/8; i++){
vb = pb[i];
vc = pc[i];
va = vb + vc;
pa[i] = va;
}
The main entry to this pass is vectorize_loops(), in which
the vectorizer applies a set of analyses on a given set of loops,
followed by the actual vectorization transformation for the loops that
had successfully passed the analysis phase.
Throughout this pass we make a distinction between two types of
data: scalars (which are represented by SSA_NAMES), and memory references
("data-refs"). These two types of data require different handling both
during analysis and transformation. The types of data-refs that the
vectorizer currently supports are ARRAY_REFS which base is an array DECL
(not a pointer), and INDIRECT_REFS through pointers; both array and pointer
accesses are required to have a simple (consecutive) access pattern.
Analysis phase:
===============
The driver for the analysis phase is vect_analyze_loop().
It applies a set of analyses, some of which rely on the scalar evolution
analyzer (scev) developed by Sebastian Pop.
During the analysis phase the vectorizer records some information
per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
loop, as well as general information about the loop as a whole, which is
recorded in a "loop_vec_info" struct attached to each loop.
Transformation phase:
=====================
The loop transformation phase scans all the stmts in the loop, and
creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
the loop that needs to be vectorized. It inserts the vector code sequence
just before the scalar stmt S, and records a pointer to the vector code
in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
attached to S). This pointer will be used for the vectorization of following
stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
otherwise, we rely on dead code elimination for removing it.
For example, say stmt S1 was vectorized into stmt VS1:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
S2: a = b;
To vectorize stmt S2, the vectorizer first finds the stmt that defines
the operand 'b' (S1), and gets the relevant vector def 'vb' from the
vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
resulting sequence would be:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
VS2: va = vb;
S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
Operands that are not SSA_NAMEs, are data-refs that appear in
load/store operations (like 'x[i]' in S1), and are handled differently.
Target modeling:
=================
Currently the only target specific information that is used is the
size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
Targets that can support different sizes of vectors, for now will need
to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
flexibility will be added in the future.
Since we only vectorize operations which vector form can be
expressed using existing tree codes, to verify that an operation is
supported, the vectorizer checks the relevant optab at the relevant
machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
the value found is CODE_FOR_nothing, then there's no target support, and
we can't vectorize the stmt.
For additional information on this project see:
http://gcc.gnu.org/projects/tree-ssa/vectorization.html
*/
static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
/* Function vect_determine_vectorization_factor
Determine the vectorization factor (VF). VF is the number of data elements
that are operated upon in parallel in a single iteration of the vectorized
loop. For example, when vectorizing a loop that operates on 4byte elements,
on a target with vector size (VS) 16byte, the VF is set to 4, since 4
elements can fit in a single vector register.
We currently support vectorization of loops in which all types operated upon
are of the same size. Therefore this function currently sets VF according to
the size of the types operated upon, and fails if there are multiple sizes
in the loop.
VF is also the factor by which the loop iterations are strip-mined, e.g.:
original loop:
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
vectorized loop:
for (i=0; i<N; i+=VF){
a[i:VF] = b[i:VF] + c[i:VF];
}
*/
static bool
vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
poly_uint64 vectorization_factor = 1;
tree scalar_type = NULL_TREE;
gphi *phi;
tree vectype;
stmt_vec_info stmt_info;
unsigned i;
HOST_WIDE_INT dummy;
gimple *stmt, *pattern_stmt = NULL;
gimple_seq pattern_def_seq = NULL;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool analyze_pattern_stmt = false;
bool bool_result;
auto_vec<stmt_vec_info> mask_producers;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_determine_vectorization_factor ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
phi = si.phi ();
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (stmt_info);
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
{
gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
scalar_type = TREE_TYPE (PHI_RESULT (phi));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vectype);
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si) || analyze_pattern_stmt;)
{
tree vf_vectype;
if (analyze_pattern_stmt)
stmt = pattern_stmt;
else
stmt = gsi_stmt (si);
stmt_info = vinfo_for_stmt (stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
gcc_assert (stmt_info);
/* Skip stmts which do not need to be vectorized. */
if ((!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
|| gimple_clobber_p (stmt))
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
gsi_next (&si);
continue;
}
}
else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
analyze_pattern_stmt = true;
/* If a pattern statement has def stmts, analyze them too. */
if (is_pattern_stmt_p (stmt_info))
{
if (pattern_def_seq == NULL)
{
pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
pattern_def_si = gsi_start (pattern_def_seq);
}
else if (!gsi_end_p (pattern_def_si))
gsi_next (&pattern_def_si);
if (pattern_def_seq != NULL)
{
gimple *pattern_def_stmt = NULL;
stmt_vec_info pattern_def_stmt_info = NULL;
while (!gsi_end_p (pattern_def_si))
{
pattern_def_stmt = gsi_stmt (pattern_def_si);
pattern_def_stmt_info
= vinfo_for_stmt (pattern_def_stmt);
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
|| STMT_VINFO_LIVE_P (pattern_def_stmt_info))
break;
gsi_next (&pattern_def_si);
}
if (!gsi_end_p (pattern_def_si))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
}
stmt = pattern_def_stmt;
stmt_info = pattern_def_stmt_info;
}
else
{
pattern_def_si = gsi_none ();
analyze_pattern_stmt = false;
}
}
else
analyze_pattern_stmt = false;
}
if (gimple_get_lhs (stmt) == NULL_TREE
/* MASK_STORE has no lhs, but is ok. */
&& (!is_gimple_call (stmt)
|| !gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
{
if (is_gimple_call (stmt))
{
/* Ignore calls with no lhs. These must be calls to
#pragma omp simd functions, and what vectorization factor
it really needs can't be determined until
vectorizable_simd_clone_call. */
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: irregular stmt.");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
return false;
}
bool_result = false;
if (STMT_VINFO_VECTYPE (stmt_info))
{
/* The only case when a vectype had been already set is for stmts
that contain a dataref, or for "pattern-stmts" (stmts
generated by the vectorizer to represent/replace a certain
idiom). */
gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
|| is_pattern_stmt_p (stmt_info)
|| !gsi_end_p (pattern_def_si));
vectype = STMT_VINFO_VECTYPE (stmt_info);
}
else
{
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
/* Bool ops don't participate in vectorization factor
computation. For comparison use compared types to
compute a factor. */
if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
&& is_gimple_assign (stmt)
&& gimple_assign_rhs_code (stmt) != COND_EXPR)
{
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
mask_producers.safe_push (stmt_info);
bool_result = true;
if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
== tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
else
{
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (!bool_result)
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
}
/* Don't try to compute VF out scalar types if we stmt
produces boolean vector. Use result vectype instead. */
if (VECTOR_BOOLEAN_TYPE_P (vectype))
vf_vectype = vectype;
else
{
/* The vectorization factor is according to the smallest
scalar type (or the largest vector size, but we only
support one vector size per loop). */
if (!bool_result)
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
&dummy);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vf_vectype = get_vectype_for_scalar_type (scalar_type);
}
if (!vf_vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized vector "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vf_vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vf_vectype);
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
}
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, "\n");
}
if (known_le (vectorization_factor, 1U))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type\n");
return false;
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
for (i = 0; i < mask_producers.length (); i++)
{
tree mask_type = NULL;
stmt = STMT_VINFO_STMT (mask_producers[i]);
if (is_gimple_assign (stmt)
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
{
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
mask_type = get_mask_type_for_scalar_type (scalar_type);
if (!mask_type)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported mask\n");
return false;
}
}
else
{
tree rhs;
ssa_op_iter iter;
gimple *def_stmt;
enum vect_def_type dt;
FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
{
if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
&def_stmt, &dt, &vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
/* No vectype probably means external definition.
Allow it in case there is another operand which
allows to determine mask type. */
if (!vectype)
continue;
if (!mask_type)
mask_type = vectype;
else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized masks "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
!= VECTOR_BOOLEAN_TYPE_P (vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: mixed mask and "
"nonmask vector types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
}
/* We may compare boolean value loaded as vector of integers.
Fix mask_type in such case. */
if (mask_type
&& !VECTOR_BOOLEAN_TYPE_P (mask_type)
&& gimple_code (stmt) == GIMPLE_ASSIGN
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
mask_type = build_same_sized_truth_vector_type (mask_type);
}
/* No mask_type should mean loop invariant predicate.
This is probably a subject for optimization in
if-conversion. */
if (!mask_type)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type;
}
return true;
}
/* Function vect_is_simple_iv_evolution.
FORNOW: A simple evolution of an induction variables in the loop is
considered a polynomial evolution. */
static bool
vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
tree * step)
{
tree init_expr;
tree step_expr;
tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
basic_block bb;
/* When there is no evolution in this loop, the evolution function
is not "simple". */
if (evolution_part == NULL_TREE)
return false;
/* When the evolution is a polynomial of degree >= 2
the evolution function is not "simple". */
if (tree_is_chrec (evolution_part))
return false;
step_expr = evolution_part;
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
dump_printf (MSG_NOTE, ", init: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
dump_printf (MSG_NOTE, "\n");
}
*init = init_expr;
*step = step_expr;
if (TREE_CODE (step_expr) != INTEGER_CST
&& (TREE_CODE (step_expr) != SSA_NAME
|| ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
&& flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
|| (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
&& (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
|| !flag_associative_math)))
&& (TREE_CODE (step_expr) != REAL_CST
|| !flag_associative_math))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.\n");
return false;
}
return true;
}
/* Function vect_analyze_scalar_cycles_1.
Examine the cross iteration def-use cycles of scalar variables
in LOOP. LOOP_VINFO represents the loop that is now being
considered for vectorization (can be LOOP, or an outer-loop
enclosing LOOP). */
static void
vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
basic_block bb = loop->header;
tree init, step;
auto_vec<gimple *, 64> worklist;
gphi_iterator gsi;
bool double_reduc;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_scalar_cycles ===\n");
/* First - identify all inductions. Reduction detection assumes that all the
inductions have been identified, therefore, this order must not be
changed. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
tree access_fn = NULL;
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
/* Skip virtual phi's. The data dependences that are associated with
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
if (virtual_operand_p (def))
continue;
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
/* Analyze the evolution function. */
access_fn = analyze_scalar_evolution (loop, def);
if (access_fn)
{
STRIP_NOPS (access_fn);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
= initial_condition_in_loop_num (access_fn, loop->num);
STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
= evolution_part_in_loop_num (access_fn, loop->num);
}
if (!access_fn
|| !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
|| (LOOP_VINFO_LOOP (loop_vinfo) != loop
&& TREE_CODE (step) != INTEGER_CST))
{
worklist.safe_push (phi);
continue;
}
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
!= NULL_TREE);
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
/* Second - identify all reductions and nested cycles. */
while (worklist.length () > 0)
{
gimple *phi = worklist.pop ();
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple *reduc_stmt;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (!virtual_operand_p (def)
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi,
&double_reduc, false);
if (reduc_stmt)
{
if (double_reduc)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_double_reduction_def;
}
else
{
if (loop != LOOP_VINFO_LOOP (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_nested_cycle;
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_reduction_def;
/* Store the reduction cycles for possible vectorization in
loop-aware SLP if it was not detected as reduction
chain. */
if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
}
}
}
else
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.\n");
}
}
/* Function vect_analyze_scalar_cycles.
Examine the cross iteration def-use cycles of scalar variables, by
analyzing the loop-header PHIs of scalar variables. Classify each
cycle as one of the following: invariant, induction, reduction, unknown.
We do that for the loop represented by LOOP_VINFO, and also to its
inner-loop, if exists.
Examples for scalar cycles:
Example1: reduction:
loop1:
for (i=0; i<N; i++)
sum += a[i];
Example2: induction:
loop2:
for (i=0; i<N; i++)
a[i] = i; */
static void
vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
/* When vectorizing an outer-loop, the inner-loop is executed sequentially.
Reductions in such inner-loop therefore have different properties than
the reductions in the nest that gets vectorized:
1. When vectorized, they are executed in the same order as in the original
scalar loop, so we can't change the order of computation when
vectorizing them.
2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
current checks are too strict. */
if (loop->inner)
vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
}
/* Transfer group and reduction information from STMT to its pattern stmt. */
static void
vect_fixup_reduc_chain (gimple *stmt)
{
gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
gimple *stmtp;
gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
do
{
stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
if (stmt)
GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
}
while (stmt);
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def;
}
/* Fixup scalar cycles that now have their stmts detected as patterns. */
static void
vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
{
gimple *first;
unsigned i;
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next)
{
if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
break;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
/* If not all stmt in the chain are patterns try to handle
the chain without patterns. */
if (! next)
{
vect_fixup_reduc_chain (first);
LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
}
}
}
/* Function vect_get_loop_niters.
Determine how many iterations the loop is executed and place it
in NUMBER_OF_ITERATIONS. Place the number of latch iterations
in NUMBER_OF_ITERATIONSM1. Place the condition under which the
niter information holds in ASSUMPTIONS.
Return the loop exit condition. */
static gcond *
vect_get_loop_niters (struct loop *loop, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
edge exit = single_exit (loop);
struct tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
*assumptions = boolean_true_node;
*number_of_iterationsm1 = chrec_dont_know;
*number_of_iterations = chrec_dont_know;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== get_loop_niters ===\n");
if (!exit)
return cond;
niter = chrec_dont_know;
may_be_zero = NULL_TREE;
niter_assumptions = boolean_true_node;
if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
|| chrec_contains_undetermined (niter_desc.niter))
return cond;
niter_assumptions = niter_desc.assumptions;
may_be_zero = niter_desc.may_be_zero;
niter = niter_desc.niter;
if (may_be_zero && integer_zerop (may_be_zero))
may_be_zero = NULL_TREE;
if (may_be_zero)
{
if (COMPARISON_CLASS_P (may_be_zero))
{
/* Try to combine may_be_zero with assumptions, this can simplify
computation of niter expression. */
if (niter_assumptions && !integer_nonzerop (niter_assumptions))
niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter_assumptions,
fold_build1 (TRUTH_NOT_EXPR,
boolean_type_node,
may_be_zero));
else
niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
build_int_cst (TREE_TYPE (niter), 0),
rewrite_to_non_trapping_overflow (niter));
may_be_zero = NULL_TREE;
}
else if (integer_nonzerop (may_be_zero))
{
*number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
*number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
return cond;
}
else
return cond;
}
*assumptions = niter_assumptions;
*number_of_iterationsm1 = niter;
/* We want the number of loop header executions which is the number
of latch executions plus one.
??? For UINT_MAX latch executions this number overflows to zero
for loops like do { n++; } while (n != 0); */
if (niter && !chrec_contains_undetermined (niter))
niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
build_int_cst (TREE_TYPE (niter), 1));
*number_of_iterations = niter;
return cond;
}
/* Function bb_in_loop_p
Used as predicate for dfs order traversal of the loop bbs. */
static bool
bb_in_loop_p (const_basic_block bb, const void *data)
{
const struct loop *const loop = (const struct loop *)data;
if (flow_bb_inside_loop_p (loop, bb))
return true;
return false;
}
/* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
stmt_vec_info structs for all the stmts in LOOP_IN. */
_loop_vec_info::_loop_vec_info (struct loop *loop_in)
: vec_info (vec_info::loop, init_cost (loop_in)),
loop (loop_in),
bbs (XCNEWVEC (basic_block, loop->num_nodes)),
num_itersm1 (NULL_TREE),
num_iters (NULL_TREE),
num_iters_unchanged (NULL_TREE),
num_iters_assumptions (NULL_TREE),
th (0),
versioning_threshold (0),
vectorization_factor (0),
max_vectorization_factor (0),
mask_skip_niters (NULL_TREE),
mask_compare_type (NULL_TREE),
unaligned_dr (NULL),
peeling_for_alignment (0),
ptr_mask (0),
ivexpr_map (NULL),
slp_unrolling_factor (1),
single_scalar_iteration_cost (0),
vectorizable (false),
can_fully_mask_p (true),
fully_masked_p (false),
peeling_for_gaps (false),
peeling_for_niter (false),
operands_swapped (false),
no_data_dependencies (false),
has_mask_store (false),
scalar_loop (NULL),
orig_loop_info (NULL)
{
/* Create/Update stmt_info for all stmts in the loop. */
basic_block *body = get_loop_body (loop);
for (unsigned int i = 0; i < loop->num_nodes; i++)
{
basic_block bb = body[i];
gimple_stmt_iterator si;
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *phi = gsi_stmt (si);
gimple_set_uid (phi, 0);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this));
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
gimple_set_uid (stmt, 0);
set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this));
}
}
free (body);
/* CHECKME: We want to visit all BBs before their successors (except for
latch blocks, for which this assertion wouldn't hold). In the simple
case of the loop forms we allow, a dfs order of the BBs would the same
as reversed postorder traversal, so we are safe. */
unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
bbs, loop->num_nodes, loop);
gcc_assert (nbbs == loop->num_nodes);
}
/* Free all levels of MASKS. */
void
release_vec_loop_masks (vec_loop_masks *masks)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (*masks, i, rgm)
rgm->masks.release ();
masks->release ();
}
/* Free all memory used by the _loop_vec_info, as well as all the
stmt_vec_info structs of all the stmts in the loop. */
_loop_vec_info::~_loop_vec_info ()
{
int nbbs;
gimple_stmt_iterator si;
int j;
nbbs = loop->num_nodes;
for (j = 0; j < nbbs; j++)
{
basic_block bb = bbs[j];
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
free_stmt_vec_info (gsi_stmt (si));
for (si = gsi_start_bb (bb); !gsi_end_p (si); )
{
gimple *stmt = gsi_stmt (si);
/* We may have broken canonical form by moving a constant
into RHS1 of a commutative op. Fix such occurrences. */
if (operands_swapped && is_gimple_assign (stmt))
{
enum tree_code code = gimple_assign_rhs_code (stmt);
if ((code == PLUS_EXPR
|| code == POINTER_PLUS_EXPR
|| code == MULT_EXPR)
&& CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
swap_ssa_operands (stmt,
gimple_assign_rhs1_ptr (stmt),
gimple_assign_rhs2_ptr (stmt));
else if (code == COND_EXPR
&& CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
{
tree cond_expr = gimple_assign_rhs1 (stmt);
enum tree_code cond_code = TREE_CODE (cond_expr);
if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
{
bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
0));
cond_code = invert_tree_comparison (cond_code,
honor_nans);
if (cond_code != ERROR_MARK)
{
TREE_SET_CODE (cond_expr, cond_code);
swap_ssa_operands (stmt,
gimple_assign_rhs2_ptr (stmt),
gimple_assign_rhs3_ptr (stmt));
}
}
}
}
/* Free stmt_vec_info. */
free_stmt_vec_info (stmt);
gsi_next (&si);
}
}
free (bbs);
release_vec_loop_masks (&masks);
delete ivexpr_map;
loop->aux = NULL;
}
/* Return an invariant or register for EXPR and emit necessary
computations in the LOOP_VINFO loop preheader. */
tree
cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
{
if (is_gimple_reg (expr)
|| is_gimple_min_invariant (expr))
return expr;
if (! loop_vinfo->ivexpr_map)
loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
if (! cached)
{
gimple_seq stmts = NULL;
cached = force_gimple_operand (unshare_expr (expr),
&stmts, true, NULL_TREE);
if (stmts)
{
edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
gsi_insert_seq_on_edge_immediate (e, stmts);
}
}
return cached;
}
/* Return true if we can use CMP_TYPE as the comparison type to produce
all masks required to mask LOOP_VINFO. */
static bool
can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
if (rgm->mask_type != NULL_TREE
&& !direct_internal_fn_supported_p (IFN_WHILE_ULT,
cmp_type, rgm->mask_type,
OPTIMIZE_FOR_SPEED))
return false;
return true;
}
/* Calculate the maximum number of scalars per iteration for every
rgroup in LOOP_VINFO. */
static unsigned int
vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
{
unsigned int res = 1;
unsigned int i;
rgroup_masks *rgm;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
res = MAX (res, rgm->max_nscalars_per_iter);
return res;
}
/* Each statement in LOOP_VINFO can be masked where necessary. Check
whether we can actually generate the masks required. Return true if so,
storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
static bool
vect_verify_full_masking (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int min_ni_width;
/* Use a normal loop if there are no statements that need masking.
This only happens in rare degenerate cases: it means that the loop
has no loads, no stores, and no live-out values. */
if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
return false;
/* Get the maximum number of iterations that is representable
in the counter type. */
tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
/* Get a more refined estimate for the number of iterations. */
widest_int max_back_edges;
if (max_loop_iterations (loop, &max_back_edges))
max_ni = wi::smin (max_ni, max_back_edges + 1);
/* Account for rgroup masks, in which each bit is replicated N times. */
max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
/* Work out how many bits we need to represent the limit. */
min_ni_width = wi::min_precision (max_ni, UNSIGNED);
/* Find a scalar mode for which WHILE_ULT is supported. */
opt_scalar_int_mode cmp_mode_iter;
tree cmp_type = NULL_TREE;
FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
{
unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
if (cmp_bits >= min_ni_width
&& targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
{
tree this_type = build_nonstandard_integer_type (cmp_bits, true);
if (this_type
&& can_produce_all_loop_masks_p (loop_vinfo, this_type))
{
/* Although we could stop as soon as we find a valid mode,
it's often better to continue until we hit Pmode, since the
operands to the WHILE are more likely to be reusable in
address calculations. */
cmp_type = this_type;
if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
break;
}
}
}
if (!cmp_type)
return false;
LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
return true;
}
/* Calculate the cost of one scalar iteration of the loop. */
static void
vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes, factor;
int innerloop_iters, i;
/* Gather costs for statements in the scalar loop. */
/* FORNOW. */
innerloop_iters = 1;
if (loop->inner)
innerloop_iters = 50; /* FIXME */
for (i = 0; i < nbbs; i++)
{
gimple_stmt_iterator si;
basic_block bb = bbs[i];
if (bb->loop_father == loop->inner)
factor = innerloop_iters;
else
factor = 1;
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
continue;
/* Skip stmts that are not vectorized inside the loop. */
if (stmt_info
&& !STMT_VINFO_RELEVANT_P (stmt_info)
&& (!STMT_VINFO_LIVE_P (stmt_info)
|| !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !STMT_VINFO_IN_PATTERN_P (stmt_info))
continue;
vect_cost_for_stmt kind;
if (STMT_VINFO_DATA_REF (stmt_info))
{
if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
kind = scalar_load;
else
kind = scalar_store;
}
else
kind = scalar_stmt;
record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
factor, kind, stmt_info, 0, vect_prologue);
}
}
/* Now accumulate cost. */
void *target_cost_data = init_cost (loop);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, si->count,
si->kind, stmt_info, si->misalign,
vect_body);
}
unsigned dummy, body_cost = 0;
finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
destroy_cost_data (target_cost_data);
LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
}
/* Function vect_analyze_loop_form_1.
Verify that certain CFG restrictions hold, including:
- the loop has a pre-header
- the loop has a single entry and exit
- the loop exit condition is simple enough
- the number of iterations can be analyzed, i.e, a countable loop. The
niter could be analyzed under some assumptions. */
bool
vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_form ===\n");
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
if (!loop->inner)
{
/* Inner-most loop. We currently require that the number of BBs is
exactly 2 (the header and latch). Vectorizable inner-most loops
look like this:
(pre-header)
|
header <--------+
| | |
| +--> latch --+
|
(exit-bb) */
if (loop->num_nodes != 2)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
if (empty_block_p (loop->header))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.\n");
return false;
}
}
else
{
struct loop *innerloop = loop->inner;
edge entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
contains a single inner loop, and the number of BBs is exactly 5.
Vectorizable outer-loops look like this:
(pre-header)
|
header <---+
| |
inner-loop |
| |
tail ------+
|
(exit-bb)
The inner-loop has the properties expected of inner-most loops
as described above. */
if ((loop->inner)->inner || (loop->inner)->next)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.\n");
return false;
}
if (loop->num_nodes != 5)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
entryedge = loop_preheader_edge (innerloop);
if (entryedge->src != loop->header
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.\n");
return false;
}
/* Analyze the inner-loop. */
tree inner_niterm1, inner_niter, inner_assumptions;
if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
&inner_assumptions, &inner_niterm1,
&inner_niter, NULL)
/* Don't support analyzing niter under assumptions for inner
loop. */
|| !integer_onep (inner_assumptions))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.\n");
return false;
}
if (!expr_invariant_in_loop_p (loop, inner_niter))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not"
" invariant.\n");
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.\n");
}
if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
if (dump_enabled_p ())
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple exits.\n");
else if (EDGE_COUNT (loop->header->preds) != 2)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many incoming edges.\n");
}
return false;
}
/* We assume that the loop exit condition is at the end of the loop. i.e,
that the loop is represented as a do-while (with a proper if-guard
before the loop if needed), where the loop header contains all the
executable statements, and the latch is empty. */
if (!empty_block_p (loop->latch)
|| !gimple_seq_empty_p (phi_nodes (loop->latch)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: latch block not empty.\n");
return false;
}
/* Make sure the exit is not abnormal. */
edge e = single_exit (loop);
if (e->flags & EDGE_ABNORMAL)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.\n");
return false;
}
*loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
number_of_iterationsm1);
if (!*loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.\n");
return false;
}
if (integer_zerop (*assumptions)
|| !*number_of_iterations
|| chrec_contains_undetermined (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.\n");
return false;
}
if (integer_zerop (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.\n");
return false;
}
return true;
}
/* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
loop_vec_info
vect_analyze_loop_form (struct loop *loop)
{
tree assumptions, number_of_iterations, number_of_iterationsm1;
gcond *loop_cond, *inner_loop_cond = NULL;
if (! vect_analyze_loop_form_1 (loop, &loop_cond,
&assumptions, &number_of_iterationsm1,
&number_of_iterations, &inner_loop_cond))
return NULL;
loop_vec_info loop_vinfo = new _loop_vec_info (loop);
LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
if (!integer_onep (assumptions))
{
/* We consider to vectorize this loop by versioning it under
some assumptions. In order to do this, we need to clear
existing information computed by scev and niter analyzer. */
scev_reset_htab ();
free_numbers_of_iterations_estimates (loop);
/* Also set flag for this loop so that following scev and niter
analysis are done under the assumptions. */
loop_constraint_set (loop, LOOP_C_FINITE);
/* Also record the assumptions for versioning. */
LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
}
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
dump_printf (MSG_NOTE, "\n");
}
}
STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
if (inner_loop_cond)
STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond))
= loop_exit_ctrl_vec_info_type;
gcc_assert (!loop->aux);
loop->aux = loop_vinfo;
return loop_vinfo;
}
/* Scan the loop stmts and dependent on whether there are any (non-)SLP
statements update the vectorization factor. */
static void
vect_update_vf_for_slp (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
poly_uint64 vectorization_factor;
int i;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_vf_for_slp ===\n");
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
/* If all the stmts in the loop can be SLPed, we perform only SLP, and
vectorization factor of the loop is the unrolling factor required by
the SLP instances. If that unrolling factor is 1, we say, that we
perform pure SLP on loop - cross iteration parallelism is not
exploited. */
bool only_slp_in_loop = true;
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& STMT_VINFO_RELATED_STMT (stmt_info))
{
stmt = STMT_VINFO_RELATED_STMT (stmt_info);
stmt_info = vinfo_for_stmt (stmt);
}
if ((STMT_VINFO_RELEVANT_P (stmt_info)
|| VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !PURE_SLP_STMT (stmt_info))
/* STMT needs both SLP and loop-based vectorization. */
only_slp_in_loop = false;
}
}
if (only_slp_in_loop)
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains only SLP stmts\n");
vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
}
else
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains SLP and non-SLP stmts\n");
/* Both the vectorization factor and unroll factor have the form
current_vector_size * X for some rational X, so they must have
a common multiple. */
vectorization_factor
= force_common_multiple (vectorization_factor,
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ".\n");
}
}
/* Return true if STMT_INFO describes a double reduction phi and if
the other phi in the reduction is also relevant for vectorization.
This rejects cases such as:
outer1:
x_1 = PHI <x_3(outer2), ...>;
...
inner:
x_2 = ...;
...
outer2:
x_3 = PHI <x_2(inner)>;
if nothing in x_2 or elsewhere makes x_1 relevant. */
static bool
vect_active_double_reduction_p (stmt_vec_info stmt_info)
{
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
return false;
gimple *other_phi = STMT_VINFO_REDUC_DEF (stmt_info);
return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi));
}
/* Function vect_analyze_loop_operations.
Scan the loop stmts and make sure they are all vectorizable. */
static bool
vect_analyze_loop_operations (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
stmt_vec_info stmt_info;
bool need_to_vectorize = false;
bool ok;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_operations ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
gphi *phi = si.phi ();
ok = true;
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
if (virtual_operand_p (gimple_phi_result (phi)))
continue;
/* Inner-loop loop-closed exit phi in outer-loop vectorization
(i.e., a phi in the tail of the outer-loop). */
if (! is_loop_header_bb_p (bb))
{
/* FORNOW: we currently don't support the case that these phis
are not used in the outerloop (unless it is double reduction,
i.e., this phi is vect_reduction_def), cause this case
requires to actually do something here. */
if (STMT_VINFO_LIVE_P (stmt_info)
&& !vect_active_double_reduction_p (stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.\n");
return false;
}
/* If PHI is used in the outer loop, we check that its operand
is defined in the inner loop. */
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
tree phi_op;
gimple *op_def_stmt;
if (gimple_phi_num_args (phi) != 1)
return false;
phi_op = PHI_ARG_DEF (phi, 0);
if (TREE_CODE (phi_op) != SSA_NAME)
return false;
op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
if (gimple_nop_p (op_def_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
|| !vinfo_for_stmt (op_def_stmt))
return false;
if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer
&& STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer_by_reduction)
return false;
}
continue;
}
gcc_assert (stmt_info);
if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
|| STMT_VINFO_LIVE_P (stmt_info))
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
{
/* A scalar-dependence cycle that we don't support. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.\n");
return false;
}
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
need_to_vectorize = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_induction (phi, NULL, NULL, NULL);
else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL);
}
/* SLP PHIs are tested by vect_slp_analyze_node_operations. */
if (ok
&& STMT_VINFO_LIVE_P (stmt_info)
&& !PURE_SLP_STMT (stmt_info))
ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL);
if (!ok)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
"supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
}
return false;
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
if (!gimple_clobber_p (stmt)
&& !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL))
return false;
}
} /* bbs */
/* All operations in the loop are either irrelevant (deal with loop
control, or dead), or only used outside the loop and can be moved
out of the loop (e.g. invariants, inductions). The loop can be
optimized away by scalar optimizations. We're better off not
touching this loop. */
if (!need_to_vectorize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.\n");
return false;
}
return true;
}
/* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
is worthwhile to vectorize. Return 1 if definitely yes, 0 if
definitely no, or -1 if it's worth retrying. */
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
/* Only fully-masked loops can have iteration counts less than the
vectorization factor. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
HOST_WIDE_INT max_niter;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
else
max_niter = max_stmt_executions_int (loop);
if (max_niter != -1
&& (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.\n");
return 0;
}
}
int min_profitable_iters, min_profitable_estimate;
vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
&min_profitable_estimate);
if (min_profitable_iters < 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.\n");
return -1;
}
int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
* assumed_vf);
/* Use the cost model only if it is more conservative than user specified
threshold. */
unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
min_profitable_iters);
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
"iterations (whichever is more conservative).\n");
return 0;
}
HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
if (estimated_niter == -1)
estimated_niter = likely_max_stmt_executions_int (loop);
if (estimated_niter != -1
&& ((unsigned HOST_WIDE_INT) estimated_niter
< MAX (th, (unsigned) min_profitable_estimate)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
"profitable iterations (whichever is more "
"conservative).\n");
return -1;
}
return 1;
}
/* Function vect_analyze_loop_2.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. */
static bool
vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
{
bool ok;
int res;
unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
poly_uint64 min_vf = 2;
unsigned int n_stmts = 0;
/* The first group of checks is independent of the vector size. */
fatal = true;
/* Find all data references in the loop (which correspond to vdefs/vuses)
and analyze their evolution in the loop. */
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop nest containing two "
"or more consecutive inner loops cannot be "
"vectorized\n");
return false;
}
for (unsigned i = 0; i < loop->num_nodes; i++)
for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
!gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
++n_stmts;
if (!find_data_references_in_stmt (loop, stmt,
&LOOP_VINFO_DATAREFS (loop_vinfo)))
{
if (is_gimple_call (stmt) && loop->safelen)
{
tree fndecl = gimple_call_fndecl (stmt), op;
if (fndecl != NULL_TREE)
{
cgraph_node *node = cgraph_node::get (fndecl);
if (node != NULL && node->simd_clones != NULL)
{
unsigned int j, n = gimple_call_num_args (stmt);
for (j = 0; j < n; j++)
{
op = gimple_call_arg (stmt, j);
if (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))
break;
}
op = gimple_call_lhs (stmt);
/* Ignore #pragma omp declare simd functions
if they don't have data references in the
call stmt itself. */
if (j == n
&& !(op
&& (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))))
continue;
}
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function "
"calls or data references that cannot "
"be analyzed\n");
return false;
}
}
/* Analyze the data references and also adjust the minimal
vectorization factor according to the loads and stores. */
ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.\n");
return false;
}
/* Classify all cross-iteration scalar data-flow cycles.
Cross-iteration cycles caused by virtual phis are analyzed separately. */
vect_analyze_scalar_cycles (loop_vinfo);
vect_pattern_recog (loop_vinfo);
vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
/* Analyze the access patterns of the data-refs in the loop (consecutive,
complex, etc.). FORNOW: Only handle consecutive access pattern. */
ok = vect_analyze_data_ref_accesses (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.\n");
return false;
}
/* Data-flow analysis to detect stmts that do not need to be vectorized. */
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.\n");
return false;
}
/* While the rest of the analysis below depends on it in some way. */
fatal = false;
/* Analyze data dependences between the data-refs in the loop
and adjust the maximum vectorization factor according to
the dependences.
FORNOW: fail at the first data dependence that we encounter. */
ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
if (!ok
|| (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, min_vf)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
ok = vect_determine_vectorization_factor (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.\n");
return false;
}
if (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
/* Compute the scalar iteration cost. */
vect_compute_single_scalar_iteration_cost (loop_vinfo);
poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned th;
/* Check the SLP opportunities in the loop, analyze and build SLP trees. */
ok = vect_analyze_slp (loop_vinfo, n_stmts);
if (!ok)
return false;
/* If there are any SLP instances mark them as pure_slp. */
bool slp = vect_make_slp_decision (loop_vinfo);
if (slp)
{
/* Find stmts that need to be both vectorized and SLPed. */
vect_detect_hybrid_slp (loop_vinfo);
/* Update the vectorization factor based on the SLP decision. */
vect_update_vf_for_slp (loop_vinfo);
}
bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
/* We don't expect to have to roll back to anything other than an empty
set of rgroups. */
gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
/* This is the point where we can re-start analysis with SLP forced off. */
start_over:
/* Now the vectorization factor is final. */
poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vectorization_factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
LOOP_VINFO_INT_NITERS (loop_vinfo));
}
HOST_WIDE_INT max_niter
= likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
/* Analyze the alignment of the data-refs in the loop.
Fail if a data reference is found that cannot be vectorized. */
ok = vect_analyze_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
/* Prune the list of ddrs to be tested at run-time by versioning for alias.
It is important to call pruning after vect_analyze_data_ref_accesses,
since we use grouping information gathered by interleaving analysis. */
ok = vect_prune_runtime_alias_test_list (loop_vinfo);
if (!ok)
return false;
/* Do not invoke vect_enhance_data_refs_alignment for eplilogue
vectorization. */
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
/* This pass will decide on using loop versioning and/or loop peeling in
order to enhance the alignment of data references in the loop. */
ok = vect_enhance_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
}
if (slp)
{
/* Analyze operations in the SLP instances. Note this may
remove unsupported SLP instances which makes the above
SLP kind detection invalid. */
unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
vect_slp_analyze_operations (loop_vinfo);
if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
goto again;
}
/* Scan all the remaining operations in the loop that are not subject
to SLP and make sure they are vectorizable. */
ok = vect_analyze_loop_operations (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.\n");
return false;
}
/* Decide whether to use a fully-masked loop for this vectorization
factor. */
LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
= (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
&& vect_verify_full_masking (loop_vinfo));
if (dump_enabled_p ())
{
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
dump_printf_loc (MSG_NOTE, vect_location,
"using a fully-masked loop.\n");
else
dump_printf_loc (MSG_NOTE, vect_location,
"not using a fully-masked loop.\n");
}
/* If epilog loop is required because of data accesses with gaps,
one additional iteration needs to be peeled. Check if there is
enough iterations for vectorization. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
if (known_lt (wi::to_widest (scalar_niters), vf))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop has no enough iterations to support"
" peeling for gaps.\n");
return false;
}
}
/* Check the costings of the loop make vectorizing worthwhile. */
res = vect_analyze_loop_costing (loop_vinfo);
if (res < 0)
goto again;
if (!res)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Loop costings not worthwhile.\n");
return false;
}
/* Decide whether we need to create an epilogue loop to handle
remaining scalar iterations. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
unsigned HOST_WIDE_INT const_vf;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
/* The main loop handles all iterations. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
{
/* Work out the (constant) number of iterations that need to be
peeled for reasons other than niters. */
unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
peel_niter += 1;
if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter,
LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
}
else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
/* ??? When peeling for gaps but not alignment, we could
try to check whether the (variable) niters is known to be
VF * N + 1. That's something of a niche case though. */
|| LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
|| ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
< (unsigned) exact_log2 (const_vf))
/* In case of versioning, check if the maximum number of
iterations is greater than th. If they are identical,
the epilogue is unnecessary. */
&& (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
|| ((unsigned HOST_WIDE_INT) max_niter
> (th / const_vf) * const_vf))))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
/* If an epilogue loop is required make sure we can create one. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
single_exit (LOOP_VINFO_LOOP
(loop_vinfo))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create required "
"epilog loop\n");
goto again;
}
}
/* During peeling, we need to check if number of loop iterations is
enough for both peeled prolog loop and vector loop. This check
can be merged along with threshold check of loop versioning, so
increase threshold for this case if necessary. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
poly_uint64 niters_th = 0;
if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
{
/* Niters for peeled prolog loop. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
{
struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
}
else
niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
}
/* Niters for at least one iteration of vectorized loop. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
/* One additional iteration because of peeling for gap. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
niters_th += 1;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
}
gcc_assert (known_eq (vectorization_factor,
LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
/* Ok to vectorize! */
return true;
again:
/* Try again with SLP forced off but if we didn't do any SLP there is
no point in re-trying. */
if (!slp)
return false;
/* If there are reduction chains re-trying will fail anyway. */
if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
return false;
/* Likewise if the grouped loads or stores in the SLP cannot be handled
via interleaving or lane instructions. */
slp_instance instance;
slp_tree node;
unsigned i, j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
{
stmt_vec_info vinfo;
vinfo = vinfo_for_stmt
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false)
&& ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
&& ! vect_grouped_store_supported (vectype, size))
return false;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo);
size = STMT_VINFO_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_load_lanes_supported (vectype, size, false)
&& ! vect_grouped_load_supported (vectype, single_element_p,
size))
return false;
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"re-trying with SLP disabled\n");
/* Roll back state appropriately. No SLP this time. */
slp = false;
/* Restore vectorization factor as it were without SLP. */
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
/* Free the SLP instances. */
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
vect_free_slp_instance (instance);
LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
/* Reset SLP type to loop_vect on all stmts. */
for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
{
basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
for (gimple_stmt_iterator si = gsi_start_phis (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
if (STMT_VINFO_IN_PATTERN_P (stmt_info))
{
stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
STMT_SLP_TYPE (stmt_info) = loop_vect;
for (gimple_stmt_iterator pi
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
!gsi_end_p (pi); gsi_next (&pi))
{
gimple *pstmt = gsi_stmt (pi);
STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect;
}
}
}
}
/* Free optimized alias test DDRS. */
LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
/* Reset target cost data. */
destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
= init_cost (LOOP_VINFO_LOOP (loop_vinfo));
/* Reset accumulated rgroup information. */
release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
/* Reset assorted flags. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
goto start_over;
}
/* Function vect_analyze_loop.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
be vectorized. */
loop_vec_info
vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
{
loop_vec_info loop_vinfo;
auto_vector_sizes vector_sizes;
/* Autodetect first vector size we try. */
current_vector_size = 0;
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest =====\n");
if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.\n");
return NULL;
}
poly_uint64 autodetected_vector_size = 0;
while (1)
{
/* Check the CFG characteristics of the loop (nesting, entry/exit). */
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.\n");
return NULL;
}
bool fatal = false;
if (orig_loop_vinfo)
LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
if (vect_analyze_loop_2 (loop_vinfo, fatal))
{
LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
return loop_vinfo;
}
delete loop_vinfo;
if (next_size == 0)
autodetected_vector_size = current_vector_size;
if (next_size < vector_sizes.length ()
&& known_eq (vector_sizes[next_size], autodetected_vector_size))
next_size += 1;
if (fatal
|| next_size == vector_sizes.length ()
|| known_eq (current_vector_size, 0U))
return NULL;
/* Try the next biggest vector size. */
current_vector_size = vector_sizes[next_size++];
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size ");
dump_dec (MSG_NOTE, current_vector_size);
dump_printf (MSG_NOTE, "\n");
}
}
}
/* Return true if there is an in-order reduction function for CODE, storing
it in *REDUC_FN if so. */
static bool
fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case PLUS_EXPR:
*reduc_fn = IFN_FOLD_LEFT_PLUS;
return true;
default:
return false;
}
}
/* Function reduction_fn_for_scalar_code
Input:
CODE - tree_code of a reduction operations.
Output:
REDUC_FN - the corresponding internal function to be used to reduce the
vector of partial results into a single scalar result, or IFN_LAST
if the operation is a supported reduction operation, but does not have
such an internal function.
Return FALSE if CODE currently cannot be vectorized as reduction. */
static bool
reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case MAX_EXPR:
*reduc_fn = IFN_REDUC_MAX;
return true;
case MIN_EXPR:
*reduc_fn = IFN_REDUC_MIN;
return true;
case PLUS_EXPR:
*reduc_fn = IFN_REDUC_PLUS;
return true;
case BIT_AND_EXPR:
*reduc_fn = IFN_REDUC_AND;
return true;
case BIT_IOR_EXPR:
*reduc_fn = IFN_REDUC_IOR;
return true;
case BIT_XOR_EXPR:
*reduc_fn = IFN_REDUC_XOR;
return true;
case MULT_EXPR:
case MINUS_EXPR:
*reduc_fn = IFN_LAST;
return true;
default:
return false;
}
}
/* If there is a neutral value X such that SLP reduction NODE would not
be affected by the introduction of additional X elements, return that X,
otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
is true if the SLP statements perform a single reduction, false if each
statement performs an independent reduction. */
static tree
neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
bool reduc_chain)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
struct loop *loop = gimple_bb (stmt)->loop_father;
gcc_assert (loop);
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
return build_zero_cst (scalar_type);
case MULT_EXPR:
return build_one_cst (scalar_type);
case BIT_AND_EXPR:
return build_all_ones_cst (scalar_type);
case MAX_EXPR:
case MIN_EXPR:
/* For MIN/MAX the initial values are neutral. A reduction chain
has only a single initial value, so that value is neutral for
all statements. */
if (reduc_chain)
return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
STMT is printed with a message MSG. */
static void
report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
{
dump_printf_loc (msg_type, vect_location, "%s", msg);
dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
}
/* Detect SLP reduction of the form:
#a1 = phi <a5, a0>
a2 = operation (a1)
a3 = operation (a2)
a4 = operation (a3)
a5 = operation (a4)
#a = phi <a5>
PHI is the reduction phi node (#a1 = phi <a5, a0> above)
FIRST_STMT is the first reduction stmt in the chain
(a2 = operation (a1)).
Return TRUE if a reduction chain was detected. */
static bool
vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
gimple *first_stmt)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
gimple *loop_use_stmt = NULL;
stmt_vec_info use_stmt_info;
tree lhs;
imm_use_iterator imm_iter;
use_operand_p use_p;
int nloop_uses, size = 0, n_out_of_loop_uses;
bool found = false;
if (loop != vect_loop)
return false;
auto_vec<stmt_vec_info, 8> reduc_chain;
lhs = PHI_RESULT (phi);
code = gimple_assign_rhs_code (first_stmt);
while (1)
{
nloop_uses = 0;
n_out_of_loop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
/* Check if we got back to the reduction phi. */
if (use_stmt == phi)
{
loop_use_stmt = use_stmt;
found = true;
break;
}
if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
loop_use_stmt = use_stmt;
nloop_uses++;
}
else
n_out_of_loop_uses++;
/* There are can be either a single use in the loop or two uses in
phi nodes. */
if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
return false;
}
if (found)
break;
/* We reached a statement with no loop uses. */
if (nloop_uses == 0)
return false;
/* This is a loop exit phi, and we haven't reached the reduction phi. */
if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
return false;
if (!is_gimple_assign (loop_use_stmt)
|| code != gimple_assign_rhs_code (loop_use_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
return false;
/* Insert USE_STMT into reduction chain. */
use_stmt_info = vinfo_for_stmt (loop_use_stmt);
reduc_chain.safe_push (use_stmt_info);
lhs = gimple_assign_lhs (loop_use_stmt);
size++;
}
if (!found || loop_use_stmt != phi || size < 2)
return false;
/* Swap the operands, if needed, to make the reduction operand be the second
operand. */
lhs = PHI_RESULT (phi);
for (unsigned i = 0; i < reduc_chain.length (); ++i)
{
gassign *next_stmt = as_a <gassign *> (reduc_chain[i]->stmt);
if (gimple_assign_rhs2 (next_stmt) == lhs)
{
tree op = gimple_assign_rhs1 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
lhs = gimple_assign_lhs (next_stmt);
continue;
}
return false;
}
else
{
tree op = gimple_assign_rhs2 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
}
swap_ssa_operands (next_stmt,
gimple_assign_rhs1_ptr (next_stmt),
gimple_assign_rhs2_ptr (next_stmt));
update_stmt (next_stmt);
if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
}
else
return false;
}
lhs = gimple_assign_lhs (next_stmt);
}
/* Build up the actual chain. */
for (unsigned i = 0; i < reduc_chain.length () - 1; ++i)
{
GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0]->stmt;
GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1]->stmt;
}
GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0]->stmt;
GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL;
/* Save the chain for further analysis in SLP detection. */
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]->stmt);
GROUP_SIZE (reduc_chain[0]) = size;
return true;
}
/* Return true if we need an in-order reduction for operation CODE
on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
overflow must wrap. */
static bool
needs_fold_left_reduction_p (tree type, tree_code code,
bool need_wrapping_integral_overflow)
{
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type))
switch (code)
{
case MIN_EXPR:
case MAX_EXPR:
return false;
default:
return !flag_associative_math;
}
if (INTEGRAL_TYPE_P (type))
{
if (!operation_no_trapping_overflow (type, code))
return true;
if (need_wrapping_integral_overflow
&& !TYPE_OVERFLOW_WRAPS (type)
&& operation_can_overflow (code))
return true;
return false;
}
if (SAT_FIXED_POINT_TYPE_P (type))
return true;
return false;
}
/* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
reduction operation CODE has a handled computation expression. */
bool
check_reduction_path (location_t loc, loop_p loop, gphi *phi, tree loop_arg,
enum tree_code code)
{
auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
auto_bitmap visited;
tree lookfor = PHI_RESULT (phi);
ssa_op_iter curri;
use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
while (USE_FROM_PTR (curr) != loop_arg)
curr = op_iter_next_use (&curri);
curri.i = curri.numops;
do
{
path.safe_push (std::make_pair (curri, curr));
tree use = USE_FROM_PTR (curr);
if (use == lookfor)
break;
gimple *def = SSA_NAME_DEF_STMT (use);
if (gimple_nop_p (def)
|| ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
{
pop:
do
{
std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
curri = x.first;
curr = x.second;
do
curr = op_iter_next_use (&curri);
/* Skip already visited or non-SSA operands (from iterating
over PHI args). */
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))));
}
while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
if (curr == NULL_USE_OPERAND_P)
break;
}
else
{
if (gimple_code (def) == GIMPLE_PHI)
curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
else
curr = op_iter_init_use (&curri, def, SSA_OP_USE);
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))))
curr = op_iter_next_use (&curri);
if (curr == NULL_USE_OPERAND_P)
goto pop;
}
}
while (1);
if (dump_file && (dump_flags & TDF_DETAILS))
{
dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
unsigned i;
std::pair<ssa_op_iter, use_operand_p> *x;
FOR_EACH_VEC_ELT (path, i, x)
{
dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
dump_printf (MSG_NOTE, " ");
}
dump_printf (MSG_NOTE, "\n");
}
/* Check whether the reduction path detected is valid. */
bool fail = path.length () == 0;
bool neg = false;
for (unsigned i = 1; i < path.length (); ++i)
{
gimple *use_stmt = USE_STMT (path[i].second);
tree op = USE_FROM_PTR (path[i].second);
if (! has_single_use (op)
|| ! is_gimple_assign (use_stmt))
{
fail = true;
break;
}
if (gimple_assign_rhs_code (use_stmt) != code)
{
if (code == PLUS_EXPR
&& gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
{
/* Track whether we negate the reduction value each iteration. */
if (gimple_assign_rhs2 (use_stmt) == op)
neg = ! neg;
}
else
{
fail = true;
break;
}
}
}
return ! fail && ! neg;
}
/* Function vect_is_simple_reduction
(1) Detect a cross-iteration def-use cycle that represents a simple
reduction computation. We look for the following pattern:
loop_header:
a1 = phi < a0, a2 >
a3 = ...
a2 = operation (a3, a1)
or
a3 = ...
loop_header:
a1 = phi < a0, a2 >
a2 = operation (a3, a1)
such that:
1. operation is commutative and associative and it is safe to
change the order of the computation
2. no uses for a2 in the loop (a2 is used out of the loop)
3. no uses of a1 in the loop besides the reduction operation
4. no uses of a1 outside the loop.
Conditions 1,4 are tested here.
Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
(2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
nested cycles.
(3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
reductions:
a1 = phi < a0, a2 >
inner loop (def of a3)
a2 = phi < a3 >
(4) Detect condition expressions, ie:
for (int i = 0; i < N; i++)
if (a[i] < val)
ret_val = a[i];
*/
static gimple *
vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool *double_reduc,
bool need_wrapping_integral_overflow,
enum vect_reduction_type *v_reduc_type)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL;
enum tree_code orig_code, code;
tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
tree type;
int nloop_uses;
tree name;
imm_use_iterator imm_iter;
use_operand_p use_p;
bool phi_def;
*double_reduc = false;
*v_reduc_type = TREE_CODE_REDUCTION;
tree phi_name = PHI_RESULT (phi);
/* ??? If there are no uses of the PHI result the inner loop reduction
won't be detected as possibly double-reduction by vectorizable_reduction
because that tries to walk the PHI arg from the preheader edge which
can be constant. See PR60382. */
if (has_zero_uses (phi_name))
return NULL;
nloop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.\n");
return NULL;
}
nloop_uses++;
if (nloop_uses > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction value used in loop.\n");
return NULL;
}
phi_use_stmt = use_stmt;
}
edge latch_e = loop_latch_edge (loop);
tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
if (TREE_CODE (loop_arg) != SSA_NAME)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return NULL;
}
def_stmt = SSA_NAME_DEF_STMT (loop_arg);
if (is_gimple_assign (def_stmt))
{
name = gimple_assign_lhs (def_stmt);
phi_def = false;
}
else if (gimple_code (def_stmt) == GIMPLE_PHI)
{
name = PHI_RESULT (def_stmt);
phi_def = true;
}
else
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: unhandled reduction operation: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0);
}
return NULL;
}
if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
return NULL;
nloop_uses = 0;
auto_vec<gphi *, 3> lcphis;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
nloop_uses++;
else
/* We can have more than one loop-closed PHI. */
lcphis.safe_push (as_a <gphi *> (use_stmt));
if (nloop_uses > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.\n");
return NULL;
}
}
/* If DEF_STMT is a phi node itself, we expect it to have a single argument
defined in the inner loop. */
if (phi_def)
{
op1 = PHI_ARG_DEF (def_stmt, 0);
if (gimple_phi_num_args (def_stmt) != 1
|| TREE_CODE (op1) != SSA_NAME)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported phi node definition.\n");
return NULL;
}
def1 = SSA_NAME_DEF_STMT (op1);
if (gimple_bb (def1)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& loop->inner
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
&& is_gimple_assign (def1)
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected double reduction: ");
*double_reduc = true;
return def_stmt;
}
return NULL;
}
/* If we are vectorizing an inner reduction we are executing that
in the original order only in case we are not dealing with a
double reduction. */
bool check_reduction = true;
if (flow_loop_nested_p (vect_loop, loop))
{
gphi *lcphi;
unsigned i;
check_reduction = false;
FOR_EACH_VEC_ELT (lcphis, i, lcphi)
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
check_reduction = true;
}
}
bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
code = orig_code = gimple_assign_rhs_code (def_stmt);
/* We can handle "res -= x[i]", which is non-associative by
simply rewriting this into "res += -x[i]". Avoid changing
gimple instruction for the first simple tests and only do this
if we're allowed to change code at all. */
if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
code = PLUS_EXPR;
if (code == COND_EXPR)
{
if (! nested_in_vect_loop)
*v_reduc_type = COND_REDUCTION;
op3 = gimple_assign_rhs1 (def_stmt);
if (COMPARISON_CLASS_P (op3))
{
op4 = TREE_OPERAND (op3, 1);
op3 = TREE_OPERAND (op3, 0);
}
if (op3 == phi_name || op4 == phi_name)
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: condition depends on previous"
" iteration: ");
return NULL;
}
op1 = gimple_assign_rhs2 (def_stmt);
op2 = gimple_assign_rhs3 (def_stmt);
}
else if (!commutative_tree_code (code) || !associative_tree_code (code))
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not commutative/associative: ");
return NULL;
}
else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
{
op1 = gimple_assign_rhs1 (def_stmt);
op2 = gimple_assign_rhs2 (def_stmt);
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not handled operation: ");
return NULL;
}
if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: both uses not ssa_names: ");
return NULL;
}
type = TREE_TYPE (gimple_assign_lhs (def_stmt));
if ((TREE_CODE (op1) == SSA_NAME
&& !types_compatible_p (type,TREE_TYPE (op1)))
|| (TREE_CODE (op2) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op2)))
|| (op3 && TREE_CODE (op3) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op3)))
|| (op4 && TREE_CODE (op4) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op4))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"reduction: multiple types: operation type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
dump_printf (MSG_NOTE, ", operands types: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op1));
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op2));
if (op3)
{
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op3));
}
if (op4)
{
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op4));
}
dump_printf (MSG_NOTE, "\n");
}
return NULL;
}
/* Check whether it's ok to change the order of the computation.
Generally, when vectorizing a reduction we change the order of the
computation. This may change the behavior of the program in some
cases, so we need to check that this is ok. One exception is when
vectorizing an outer-loop: the inner-loop is executed sequentially,
and therefore vectorizing reductions in the inner-loop during
outer-loop vectorization is safe. */
if (check_reduction
&& *v_reduc_type == TREE_CODE_REDUCTION
&& needs_fold_left_reduction_p (type, code,
need_wrapping_integral_overflow))
*v_reduc_type = FOLD_LEFT_REDUCTION;
/* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
2) floating point arithmetic, and special flags permit this optimization
3) nested cycle (i.e., outer loop vectorization). */
if (TREE_CODE (op1) == SSA_NAME)
def1 = SSA_NAME_DEF_STMT (op1);
if (TREE_CODE (op2) == SSA_NAME)
def2 = SSA_NAME_DEF_STMT (op2);
if (code != COND_EXPR
&& ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
return NULL;
}
/* Check that one def is the reduction def, defined by PHI,
the other def is either defined in the loop ("vect_internal_def"),
or it's an induction (defined by a loop-header phi-node). */
if (def2 && def2 == phi
&& (code == COND_EXPR
|| !def1 || gimple_nop_p (def1)
|| !flow_bb_inside_loop_p (loop, gimple_bb (def1))
|| (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
&& (is_gimple_assign (def1)
|| is_gimple_call (def1)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_induction_def
|| (gimple_code (def1) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
return def_stmt;
}
if (def1 && def1 == phi
&& (code == COND_EXPR
|| !def2 || gimple_nop_p (def2)
|| !flow_bb_inside_loop_p (loop, gimple_bb (def2))
|| (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
&& (is_gimple_assign (def2)
|| is_gimple_call (def2)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_induction_def
|| (gimple_code (def2) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def2)))))))
{
if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
{
/* Check if we can swap operands (just for simplicity - so that
the rest of the code can assume that the reduction variable
is always the last (second) argument). */
if (code == COND_EXPR)
{
/* Swap cond_expr by inverting the condition. */
tree cond_expr = gimple_assign_rhs1 (def_stmt);
enum tree_code invert_code = ERROR_MARK;
enum tree_code cond_code = TREE_CODE (cond_expr);
if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
{
bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
invert_code = invert_tree_comparison (cond_code, honor_nans);
}
if (invert_code != ERROR_MARK)
{
TREE_SET_CODE (cond_expr, invert_code);
swap_ssa_operands (def_stmt,
gimple_assign_rhs2_ptr (def_stmt),
gimple_assign_rhs3_ptr (def_stmt));
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: cannot swap operands "
"for cond_expr");
return NULL;
}
}
else
swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
gimple_assign_rhs2_ptr (def_stmt));
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: need to swap operands: ");
if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
}
return def_stmt;
}
/* Try to find SLP reduction chain. */
if (! nested_in_vect_loop
&& code != COND_EXPR
&& orig_code != MINUS_EXPR
&& vect_is_slp_reduction (loop_info, phi, def_stmt))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"reduction: detected reduction chain: ");
return def_stmt;
}
/* Dissolve group eventually half-built by vect_is_slp_reduction. */
gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
while (first)
{
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
first = next;
}
/* Look for the expression computing loop_arg from loop PHI result. */
if (check_reduction_path (vect_location, loop, as_a <gphi *> (phi), loop_arg,
code))
return def_stmt;
if (dump_enabled_p ())
{
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unknown pattern: ");
}
return NULL;
}
/* Wrapper around vect_is_simple_reduction, which will modify code
in-place if it enables detection of more reductions. Arguments
as there. */
gimple *
vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool *double_reduc,
bool need_wrapping_integral_overflow)
{
enum vect_reduction_type v_reduc_type;
gimple *def = vect_is_simple_reduction (loop_info, phi, double_reduc,
need_wrapping_integral_overflow,
&v_reduc_type);
if (def)
{
stmt_vec_info reduc_def_info = vinfo_for_stmt (phi);
STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type;
STMT_VINFO_REDUC_DEF (reduc_def_info) = def;
reduc_def_info = vinfo_for_stmt (def);
STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type;
STMT_VINFO_REDUC_DEF (reduc_def_info) = phi;
}
return def;
}
/* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
int
vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
int *peel_iters_epilogue,
stmt_vector_for_cost *scalar_cost_vec,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *epilogue_cost_vec)
{
int retval = 0;
int assumed_vf = vect_vf_for_cost (loop_vinfo);
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
*peel_iters_epilogue = assumed_vf / 2;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
"because loop iterations are unknown .\n");
/* If peeled iterations are known but number of scalar loop
iterations are unknown, count a taken branch per peeled loop. */
retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
NULL, 0, vect_prologue);
retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
NULL, 0, vect_epilogue);
}
else
{
int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
peel_iters_prologue = niters < peel_iters_prologue ?
niters : peel_iters_prologue;
*peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
/* If we need to peel for gaps, but no peeling is required, we have to
peel VF iterations. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
*peel_iters_epilogue = assumed_vf;
}
stmt_info_for_cost *si;
int j;
if (peel_iters_prologue)
FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
{
stmt_vec_info stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
retval += record_stmt_cost (prologue_cost_vec,
si->count * peel_iters_prologue,
si->kind, stmt_info, si->misalign,
vect_prologue);
}
if (*peel_iters_epilogue)
FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
{
stmt_vec_info stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
retval += record_stmt_cost (epilogue_cost_vec,
si->count * *peel_iters_epilogue,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
return retval;
}
/* Function vect_estimate_min_profitable_iters
Return the number of iterations required for the vector version of the
loop to be profitable relative to the cost of the scalar version of the
loop.
*RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
of iterations for vectorization. -1 value means loop vectorization
is not profitable. This returned value may be used for dynamic
profitability check.
*RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
for static check against estimated number of iterations. */
static void
vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
int *ret_min_profitable_niters,
int *ret_min_profitable_estimate)
{
int min_profitable_iters;
int min_profitable_estimate;
int peel_iters_prologue;
int peel_iters_epilogue;
unsigned vec_inside_cost = 0;
int vec_outside_cost = 0;
unsigned vec_prologue_cost = 0;
unsigned vec_epilogue_cost = 0;
int scalar_single_iter_cost = 0;
int scalar_outside_cost = 0;
int assumed_vf = vect_vf_for_cost (loop_vinfo);
int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
/* Cost model disabled. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
*ret_min_profitable_niters = 0;
*ret_min_profitable_estimate = 0;
return;
}
/* Requires loop versioning tests to handle misalignment. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning to treat misalignment.\n");
}
/* Requires loop versioning with alias checks. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
if (len)
/* Count LEN - 1 ANDs and LEN comparisons. */
(void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
NULL, 0, vect_prologue);
len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
if (len)
{
/* Count LEN - 1 ANDs and LEN comparisons. */
unsigned int nstmts = len * 2 - 1;
/* +1 for each bias that needs adding. */
for (unsigned int i = 0; i < len; ++i)
if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
nstmts += 1;
(void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
NULL, 0, vect_prologue);
}
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning aliasing.\n");
}
/* Requires loop versioning with niter checks. */
if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
(void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning niters.\n");
}
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
vect_prologue);
/* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now.
TODO: Add outer loop support.
TODO: Consider assigning different costs to different scalar
statements. */
scalar_single_iter_cost
= LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
/* Add additional cost for the peeled instructions in prologue and epilogue
loop. (For fully-masked loops there will be no peeling.)
FORNOW: If we don't know the value of peel_iters for prologue or epilogue
at compile-time - we assume it's vf/2 (the worst would be vf-1).
TODO: Build an expression that represents peel_iters for prologue and
epilogue to be used in a run-time test. */
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
peel_iters_prologue = 0;
peel_iters_epilogue = 0;
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
{
/* We need to peel exactly one iteration. */
peel_iters_epilogue += 1;
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, si->count,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
}
}
else if (npeel < 0)
{
peel_iters_prologue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"prologue peel iters set to vf/2.\n");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
peel_iters_epilogue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"epilogue peel iters set to vf/2 because "
"peeling for alignment is unknown.\n");
/* If peeled iterations are unknown, count a taken branch and a not taken
branch per peeled loop. Even if scalar loop iterations are known,
vector iterations are not known since peeled prologue iterations are
not known. Hence guards remain the same. */
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_prologue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_prologue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_epilogue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_epilogue);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data,
si->count * peel_iters_prologue,
si->kind, stmt_info, si->misalign,
vect_prologue);
(void) add_stmt_cost (target_cost_data,
si->count * peel_iters_epilogue,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
}
else
{
stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
stmt_info_for_cost *si;
int j;
void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
prologue_cost_vec.create (2);
epilogue_cost_vec.create (2);
peel_iters_prologue = npeel;
(void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
&peel_iters_epilogue,
&LOOP_VINFO_SCALAR_ITERATION_COST
(loop_vinfo),
&prologue_cost_vec,
&epilogue_cost_vec);
FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (data, si->count, si->kind, stmt_info,
si->misalign, vect_prologue);
}
FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (data, si->count, si->kind, stmt_info,
si->misalign, vect_epilogue);
}
prologue_cost_vec.release ();
epilogue_cost_vec.release ();
}
/* FORNOW: The scalar outside cost is incremented in one of the
following ways:
1. The vectorizer checks for alignment and aliasing and generates
a condition that allows dynamic vectorization. A cost model
check is ANDED with the versioning condition. Hence scalar code
path now has the added cost of the versioning check.
if (cost > th & versioning_check)
jmp to vector code
Hence run-time scalar is incremented by not-taken branch cost.
2. The vectorizer then checks if a prologue is required. If the
cost model check was not done before during versioning, it has to
be done before the prologue check.
if (cost <= th)
prologue = scalar_iters
if (prologue == 0)
jmp to vector code
else
execute prologue
if (prologue == num_iters)
go to exit
Hence the run-time scalar cost is incremented by a taken branch,
plus a not-taken branch, plus a taken branch cost.
3. The vectorizer then checks if an epilogue is required. If the
cost model check was not done before during prologue check, it
has to be done with the epilogue check.
if (prologue == 0)
jmp to vector code
else
execute prologue
if (prologue == num_iters)
go to exit
vector code:
if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
jmp to epilogue
Hence the run-time scalar cost should be incremented by 2 taken
branches.
TODO: The back end may reorder the BBS's differently and reverse
conditions/branch directions. Change the estimates below to
something more reasonable. */
/* If the number of iterations is known and we do not do versioning, we can
decide whether to vectorize at compile time. Hence the scalar version
do not carry cost model guard costs. */
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
|| LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
/* Cost model check occurs at versioning. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
else
{
/* Cost model check occurs at prologue generation. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
+ vect_get_stmt_cost (cond_branch_not_taken);
/* Cost model check occurs at epilogue generation. */
else
scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
}
}
/* Complete the target-specific cost calculations. */
finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
&vec_inside_cost, &vec_epilogue_cost);
vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
vec_inside_cost);
dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
vec_prologue_cost);
dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
vec_epilogue_cost);
dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
scalar_single_iter_cost);
dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
scalar_outside_cost);
dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
vec_outside_cost);
dump_printf (MSG_NOTE, " prologue iterations: %d\n",
peel_iters_prologue);
dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
peel_iters_epilogue);
}
/* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only. The following condition
must hold true:
SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
where
SIC = scalar iteration cost, VIC = vector iteration cost,
VOC = vector outside cost, VF = vectorization factor,
PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
SOC = scalar outside cost for run time cost model check. */
if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
{
min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
* assumed_vf
- vec_inside_cost * peel_iters_prologue
- vec_inside_cost * peel_iters_epilogue);
if (min_profitable_iters <= 0)
min_profitable_iters = 0;
else
{
min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
- vec_inside_cost);
if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
<= (((int) vec_inside_cost * min_profitable_iters)
+ (((int) vec_outside_cost - scalar_outside_cost)
* assumed_vf)))
min_profitable_iters++;
}
}
/* vector version will never be profitable. */
else
{
if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
"did not happen for a simd loop");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cost model: the vector iteration cost = %d "
"divided by the scalar iteration cost = %d "
"is greater or equal to the vectorization factor = %d"
".\n",
vec_inside_cost, scalar_single_iter_cost, assumed_vf);
*ret_min_profitable_niters = -1;
*ret_min_profitable_estimate = -1;
return;
}
dump_printf (MSG_NOTE,
" Calculated minimum iters for profitability: %d\n",
min_profitable_iters);
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& min_profitable_iters < (assumed_vf + peel_iters_prologue))
/* We want the vectorized loop to execute at least once. */
min_profitable_iters = assumed_vf + peel_iters_prologue;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Runtime profitability threshold = %d\n",
min_profitable_iters);
*ret_min_profitable_niters = min_profitable_iters;
/* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only.
Non-vectorized variant is SIC * niters and it must win over vector
variant on the expected loop trip count. The following condition must hold true:
SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
if (vec_outside_cost <= 0)
min_profitable_estimate = 0;
else
{
min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
* assumed_vf
- vec_inside_cost * peel_iters_prologue
- vec_inside_cost * peel_iters_epilogue)
/ ((scalar_single_iter_cost * assumed_vf)
- vec_inside_cost);
}
min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Static estimate profitability threshold = %d\n",
min_profitable_estimate);
*ret_min_profitable_estimate = min_profitable_estimate;
}
/* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
vector elements (not bits) for a vector with NELT elements. */
static void
calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
vec_perm_builder *sel)
{
/* The encoding is a single stepped pattern. Any wrap-around is handled
by vec_perm_indices. */
sel->new_vector (nelt, 1, 3);
for (unsigned int i = 0; i < 3; i++)
sel->quick_push (i + offset);
}
/* Checks whether the target supports whole-vector shifts for vectors of mode
MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
it supports vec_perm_const with masks for all necessary shift amounts. */
static bool
have_whole_vector_shift (machine_mode mode)
{
if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
return true;
/* Variable-length vectors should be handled via the optab. */
unsigned int nelt;
if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
return false;
vec_perm_builder sel;
vec_perm_indices indices;
for (unsigned int i = nelt / 2; i >= 1; i /= 2)
{
calc_vec_perm_mask_for_shift (i, nelt, &sel);
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices, false))
return false;
}
return true;
}
/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
functions. Design better to avoid maintenance issues. */
/* Function vect_model_reduction_cost.
Models cost for a reduction operation, including the vector ops
generated within the strip-mine loop, the initial definition before
the loop, and the epilogue code that must be generated. */
static void
vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
int ncopies)
{
int prologue_cost = 0, epilogue_cost = 0, inside_cost;
enum tree_code code;
optab optab;
tree vectype;
gimple *orig_stmt;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
void *target_cost_data;
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
}
else
target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
/* Condition reductions generate two reductions in the loop. */
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (reduction_type == COND_REDUCTION)
ncopies *= 2;
vectype = STMT_VINFO_VECTYPE (stmt_info);
mode = TYPE_MODE (vectype);
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (!orig_stmt)
orig_stmt = STMT_VINFO_STMT (stmt_info);
code = gimple_assign_rhs_code (orig_stmt);
if (reduction_type == EXTRACT_LAST_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION)
{
/* No extra instructions needed in the prologue. */
prologue_cost = 0;
if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
/* Count one reduction-like operation per vector. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vec_to_scalar,
stmt_info, 0, vect_body);
else
{
/* Use NELEMENTS extracts and NELEMENTS scalar ops. */
unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
inside_cost = add_stmt_cost (target_cost_data, nelements,
vec_to_scalar, stmt_info, 0,
vect_body);
inside_cost += add_stmt_cost (target_cost_data, nelements,
scalar_stmt, stmt_info, 0,
vect_body);
}
}
else
{
/* Add in cost for initial definition.
For cond reduction we have four vectors: initial index, step,
initial result of the data reduction, initial value of the index
reduction. */
int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
scalar_to_vec, stmt_info, 0,
vect_prologue);
/* Cost of reduction op inside loop. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
stmt_info, 0, vect_body);
}
/* Determine cost of epilogue code.
We have a reduction operator that will reduce the vector in one statement.
Also requires scalar extract. */
if (!loop || !nested_in_vect_loop_p (loop, orig_stmt))
{
if (reduc_fn != IFN_LAST)
{
if (reduction_type == COND_REDUCTION)
{
/* An EQ stmt and an COND_EXPR stmt. */
epilogue_cost += add_stmt_cost (target_cost_data, 2,
vector_stmt, stmt_info, 0,
vect_epilogue);
/* Reduction of the max index and a reduction of the found
values. */
epilogue_cost += add_stmt_cost (target_cost_data, 2,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
/* A broadcast of the max value. */
epilogue_cost += add_stmt_cost (target_cost_data, 1,
scalar_to_vec, stmt_info, 0,
vect_epilogue);
}
else
{
epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
stmt_info, 0, vect_epilogue);
epilogue_cost += add_stmt_cost (target_cost_data, 1,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
}
}
else if (reduction_type == COND_REDUCTION)
{
unsigned estimated_nunits = vect_nunits_for_cost (vectype);
/* Extraction of scalar elements. */
epilogue_cost += add_stmt_cost (target_cost_data,
2 * estimated_nunits,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
/* Scalar max reductions via COND_EXPR / MAX_EXPR. */
epilogue_cost += add_stmt_cost (target_cost_data,
2 * estimated_nunits - 3,
scalar_stmt, stmt_info, 0,
vect_epilogue);
}
else if (reduction_type == EXTRACT_LAST_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION)
/* No extra instructions need in the epilogue. */
;
else
{
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
tree bitsize =
TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
int element_bitsize = tree_to_uhwi (bitsize);
int nelements = vec_size_in_bits / element_bitsize;
if (code == COND_EXPR)
code = MAX_EXPR;
optab = optab_for_tree_code (code, vectype, optab_default);
/* We have a whole vector shift available. */
if (optab != unknown_optab
&& VECTOR_MODE_P (mode)
&& optab_handler (optab, mode) != CODE_FOR_nothing
&& have_whole_vector_shift (mode))
{
/* Final reduction via vector shifts and the reduction operator.
Also requires scalar extract. */
epilogue_cost += add_stmt_cost (target_cost_data,
exact_log2 (nelements) * 2,
vector_stmt, stmt_info, 0,
vect_epilogue);
epilogue_cost += add_stmt_cost (target_cost_data, 1,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
}
else
/* Use extracts and reduction op for final reduction. For N
elements, we have N extracts and N-1 reduction ops. */
epilogue_cost += add_stmt_cost (target_cost_data,
nelements + nelements - 1,
vector_stmt, stmt_info, 0,
vect_epilogue);
}
}
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"vect_model_reduction_cost: inside_cost = %d, "
"prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
prologue_cost, epilogue_cost);
}
/* Function vect_model_induction_cost.
Models cost for induction operations. */
static void
vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
unsigned inside_cost, prologue_cost;
if (PURE_SLP_STMT (stmt_info))
return;
/* loop cost for vec_loop. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
stmt_info, 0, vect_body);
/* prologue cost for vec_init and vec_step. */
prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
stmt_info, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
/* Function get_initial_def_for_reduction
Input:
STMT - a stmt that performs a reduction operation in the loop.
INIT_VAL - the initial value of the reduction variable
Output:
ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
of the reduction (used for adjusting the epilog - see below).
Return a vector variable, initialized according to the operation that STMT
performs. This vector will be used as the initial value of the
vector of partial results.
Option1 (adjust in epilog): Initialize the vector as follows:
add/bit or/xor: [0,0,...,0,0]
mult/bit and: [1,1,...,1,1]
min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
and when necessary (e.g. add/mult case) let the caller know
that it needs to adjust the result by init_val.
Option2: Initialize the vector as follows:
add/bit or/xor: [init_val,0,0,...,0]
mult/bit and: [init_val,1,1,...,1]
min/max/cond_expr: [init_val,init_val,...,init_val]
and no adjustments are needed.
For example, for the following code:
s = init_val;
for (i=0;i<n;i++)
s = s + a[i];
STMT is 's = s + a[i]', and the reduction variable is 's'.
For a vector of 4 units, we want to return either [0,0,0,init_val],
or [0,0,0,0] and let the caller know that it needs to adjust
the result at the end by 'init_val'.
FORNOW, we are using the 'adjust in epilog' scheme, because this way the
initialization vector is simpler (same element in all entries), if
ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
A cost model should help decide between these two schemes. */
tree
get_initial_def_for_reduction (gimple *stmt, tree init_val,
tree *adjustment_def)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (scalar_type);
enum tree_code code = gimple_assign_rhs_code (stmt);
tree def_for_init;
tree init_def;
bool nested_in_vect_loop = false;
REAL_VALUE_TYPE real_init_val = dconst0;
int int_init_val = 0;
gimple *def_stmt = NULL;
gimple_seq stmts = NULL;
gcc_assert (vectype);
gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
|| SCALAR_FLOAT_TYPE_P (scalar_type));
if (nested_in_vect_loop_p (loop, stmt))
nested_in_vect_loop = true;
else
gcc_assert (loop == (gimple_bb (stmt))->loop_father);
/* In case of double reduction we only create a vector variable to be put
in the reduction phi node. The actual statement creation is done in
vect_create_epilog_for_reduction. */
if (adjustment_def && nested_in_vect_loop
&& TREE_CODE (init_val) == SSA_NAME
&& (def_stmt = SSA_NAME_DEF_STMT (init_val))
&& gimple_code (def_stmt) == GIMPLE_PHI
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& vinfo_for_stmt (def_stmt)
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_double_reduction_def)
{
*adjustment_def = NULL;
return vect_create_destination_var (init_val, vectype);
}
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
/* In case of a nested reduction do not use an adjustment def as
that case is not supported by the epilogue generation correctly
if ncopies is not one. */
if (adjustment_def && nested_in_vect_loop)
{
*adjustment_def = NULL;
return vect_get_vec_def_for_operand (init_val, stmt);
}
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case MULT_EXPR:
case BIT_AND_EXPR:
{
/* ADJUSTMENT_DEF is NULL when called from
vect_create_epilog_for_reduction to vectorize double reduction. */
if (adjustment_def)
*adjustment_def = init_val;
if (code == MULT_EXPR)
{
real_init_val = dconst1;
int_init_val = 1;
}
if (code == BIT_AND_EXPR)
int_init_val = -1;
if (SCALAR_FLOAT_TYPE_P (scalar_type))
def_for_init = build_real (scalar_type, real_init_val);
else
def_for_init = build_int_cst (scalar_type, int_init_val);
if (adjustment_def)
/* Option1: the first element is '0' or '1' as well. */
init_def = gimple_build_vector_from_val (&stmts, vectype,
def_for_init);
else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
{
/* Option2 (variable length): the first element is INIT_VAL. */
init_def = build_vector_from_val (vectype, def_for_init);
gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT,
2, init_def, init_val);
init_def = make_ssa_name (vectype);
gimple_call_set_lhs (call, init_def);
gimple_seq_add_stmt (&stmts, call);
}
else
{
/* Option2: the first element is INIT_VAL. */
tree_vector_builder elts (vectype, 1, 2);
elts.quick_push (init_val);
elts.quick_push (def_for_init);
init_def = gimple_build_vector (&stmts, &elts);
}
}
break;
case MIN_EXPR:
case MAX_EXPR:
case COND_EXPR:
{
if (adjustment_def)
{
*adjustment_def = NULL_TREE;
if (reduction_type != COND_REDUCTION
&& reduction_type != EXTRACT_LAST_REDUCTION)
{
init_def = vect_get_vec_def_for_operand (init_val, stmt);
break;
}
}
init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
}
break;
default:
gcc_unreachable ();
}
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
return init_def;
}
/* Get at the initial defs for the reduction PHIs in SLP_NODE.
NUMBER_OF_VECTORS is the number of vector defs to create.
If NEUTRAL_OP is nonnull, introducing extra elements of that
value will not change the result. */
static void
get_initial_defs_for_reduction (slp_tree slp_node,
vec<tree> *vec_oprnds,
unsigned int number_of_vectors,
bool reduc_chain, tree neutral_op)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
unsigned HOST_WIDE_INT nunits;
unsigned j, number_of_places_left_in_vector;
tree vector_type;
tree vop;
int group_size = stmts.length ();
unsigned int vec_num, i;
unsigned number_of_copies = 1;
vec<tree> voprnds;
voprnds.create (number_of_vectors);
struct loop *loop;
auto_vec<tree, 16> permute_results;
vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
loop = (gimple_bb (stmt))->loop_father;
gcc_assert (loop);
edge pe = loop_preheader_edge (loop);
gcc_assert (!reduc_chain || neutral_op);
/* NUMBER_OF_COPIES is the number of times we need to use the same values in
created vectors. It is greater than 1 if unrolling is performed.
For example, we have two scalar operands, s1 and s2 (e.g., group of
strided accesses of size two), while NUNITS is four (i.e., four scalars
of this type can be packed in a vector). The output vector will contain
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2).
If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
containing the operands.
For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
{s5, s6, s7, s8}. */
if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
nunits = group_size;
number_of_copies = nunits * number_of_vectors / group_size;
number_of_places_left_in_vector = nunits;
bool constant_p = true;
tree_vector_builder elts (vector_type, nunits, 1);
elts.quick_grow (nunits);
for (j = 0; j < number_of_copies; j++)
{
for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
{
tree op;
/* Get the def before the loop. In reduction chain we have only
one initial value. */
if ((j != (number_of_copies - 1)
|| (reduc_chain && i != 0))
&& neutral_op)
op = neutral_op;
else
op = PHI_ARG_DEF_FROM_EDGE (stmt, pe);
/* Create 'vect_ = {op0,op1,...,opn}'. */
number_of_places_left_in_vector--;
elts[number_of_places_left_in_vector] = op;
if (!CONSTANT_CLASS_P (op))
constant_p = false;
if (number_of_places_left_in_vector == 0)
{
gimple_seq ctor_seq = NULL;
tree init;
if (constant_p && !neutral_op
? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
: known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
/* Build the vector directly from ELTS. */
init = gimple_build_vector (&ctor_seq, &elts);
else if (neutral_op)
{
/* Build a vector of the neutral value and shift the
other elements into place. */
init = gimple_build_vector_from_val (&ctor_seq, vector_type,
neutral_op);
int k = nunits;
while (k > 0 && elts[k - 1] == neutral_op)
k -= 1;
while (k > 0)
{
k -= 1;
gcall *call = gimple_build_call_internal
(IFN_VEC_SHL_INSERT, 2, init, elts[k]);
init = make_ssa_name (vector_type);
gimple_call_set_lhs (call, init);
gimple_seq_add_stmt (&ctor_seq, call);
}
}
else
{
/* First time round, duplicate ELTS to fill the
required number of vectors, then cherry pick the
appropriate result for each iteration. */
if (vec_oprnds->is_empty ())
duplicate_and_interleave (&ctor_seq, vector_type, elts,
number_of_vectors,
permute_results);
init = permute_results[number_of_vectors - j - 1];
}
if (ctor_seq != NULL)
gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
voprnds.quick_push (init);
number_of_places_left_in_vector = nunits;
elts.new_vector (vector_type, nunits, 1);
elts.quick_grow (nunits);
constant_p = true;
}
}
}
/* Since the vectors are created in the reverse order, we should invert
them. */
vec_num = voprnds.length ();
for (j = vec_num; j != 0; j--)
{
vop = voprnds[j - 1];
vec_oprnds->quick_push (vop);
}
voprnds.release ();
/* In case that VF is greater than the unrolling factor needed for the SLP
group of stmts, NUMBER_OF_VECTORS to be created is greater than
NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
to replicate the vectors. */
tree neutral_vec = NULL;
while (number_of_vectors > vec_oprnds->length ())
{
if (neutral_op)
{
if (!neutral_vec)
{
gimple_seq ctor_seq = NULL;
neutral_vec = gimple_build_vector_from_val
(&ctor_seq, vector_type, neutral_op);
if (ctor_seq != NULL)
gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
}
vec_oprnds->quick_push (neutral_vec);
}
else
{
for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
vec_oprnds->quick_push (vop);
}
}
}
/* Function vect_create_epilog_for_reduction
Create code at the loop-epilog to finalize the result of a reduction
computation.
VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
reduction statements.
STMT is the scalar reduction stmt that is being vectorized.
NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
number of elements that we can fit in a vectype (nunits). In this case
we have to generate more than one vector stmt - i.e - we need to "unroll"
the vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation.
REDUC_FN is the internal function for the epilog reduction.
REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
computation.
REDUC_INDEX is the index of the operand in the right hand side of the
statement that is defined by REDUCTION_PHI.
DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
SLP_NODE is an SLP node containing a group of reduction statements. The
first one in this group is STMT.
INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
be smaller than any value of the IV in the loop, for MIN_EXPR larger than
any value of the IV in the loop.
INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
null if this is not an SLP reduction
This function:
1. Creates the reduction def-use cycles: sets the arguments for
REDUCTION_PHIS:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is taken from VECT_DEFS - the vector of partial
sums.
2. "Reduces" each vector of partial results VECT_DEFS into a single result,
by calling the function specified by REDUC_FN if available, or by
other means (whole-vector shifts or a scalar loop).
The function also creates a new phi node at the loop exit to preserve
loop-closed form, as illustrated below.
The flow at the entry to this function:
loop:
vec_def = phi <null, null> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
s_loop = scalar_stmt # (scalar) STMT
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
use <s_out0>
use <s_out0>
The above is transformed by this function into:
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
s_loop = scalar_stmt # (scalar) STMT
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out4>
use <s_out4>
*/
static void
vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
gimple *reduc_def_stmt,
int ncopies, internal_fn reduc_fn,
vec<gimple *> reduction_phis,
bool double_reduc,
slp_tree slp_node,
slp_instance slp_node_instance,
tree induc_val, enum tree_code induc_code,
tree neutral_op)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info prev_phi_info;
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
basic_block exit_bb;
tree scalar_dest;
tree scalar_type;
gimple *new_phi = NULL, *phi;
gimple_stmt_iterator exit_gsi;
tree vec_dest;
tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
gimple *epilog_stmt = NULL;
enum tree_code code = gimple_assign_rhs_code (stmt);
gimple *exit_phi;
tree bitsize;
tree adjustment_def = NULL;
tree vec_initial_def = NULL;
tree expr, def, initial_def = NULL;
tree orig_name, scalar_result;
imm_use_iterator imm_iter, phi_imm_iter;
use_operand_p use_p, phi_use_p;
gimple *use_stmt, *orig_stmt, *reduction_phi = NULL;
bool nested_in_vect_loop = false;
auto_vec<gimple *> new_phis;
auto_vec<gimple *> inner_phis;
enum vect_def_type dt = vect_unknown_def_type;
int j, i;
auto_vec<tree> scalar_results;
unsigned int group_size = 1, k, ratio;
auto_vec<tree> vec_initial_defs;
auto_vec<gimple *> phis;
bool slp_reduc = false;
bool direct_slp_reduc;
tree new_phi_result;
gimple *inner_phi = NULL;
tree induction_index = NULL_TREE;
if (slp_node)
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
if (nested_in_vect_loop_p (loop, stmt))
{
outer_loop = loop;
loop = loop->inner;
nested_in_vect_loop = true;
gcc_assert (!slp_node);
}
vectype = STMT_VINFO_VECTYPE (stmt_info);
gcc_assert (vectype);
mode = TYPE_MODE (vectype);
/* 1. Create the reduction def-use cycle:
Set the arguments of REDUCTION_PHIS, i.e., transform
loop:
vec_def = phi <null, null> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
...
into:
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
...
(in case of SLP, do it for all the phis). */
/* Get the loop-entry arguments. */
enum vect_def_type initial_def_dt = vect_unknown_def_type;
if (slp_node)
{
unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
vec_initial_defs.reserve (vec_num);
get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
&vec_initial_defs, vec_num,
GROUP_FIRST_ELEMENT (stmt_info),
neutral_op);
}
else
{
/* Get at the scalar def before the loop, that defines the initial value
of the reduction variable. */
gimple *def_stmt;
initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
loop_preheader_edge (loop));
/* Optimize: if initial_def is for REDUC_MAX smaller than the base
and we can't use zero for induc_val, use initial_def. Similarly
for REDUC_MIN and initial_def larger than the base. */
if (TREE_CODE (initial_def) == INTEGER_CST
&& (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !integer_zerop (induc_val)
&& ((induc_code == MAX_EXPR
&& tree_int_cst_lt (initial_def, induc_val))
|| (induc_code == MIN_EXPR
&& tree_int_cst_lt (induc_val, initial_def))))
induc_val = initial_def;
vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt);
vec_initial_def = get_initial_def_for_reduction (stmt, initial_def,
&adjustment_def);
vec_initial_defs.create (1);
vec_initial_defs.quick_push (vec_initial_def);
}
/* Set phi nodes arguments. */
FOR_EACH_VEC_ELT (reduction_phis, i, phi)
{
tree vec_init_def = vec_initial_defs[i];
tree def = vect_defs[i];
for (j = 0; j < ncopies; j++)
{
if (j != 0)
{
phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
if (nested_in_vect_loop)
vec_init_def
= vect_get_vec_def_for_stmt_copy (initial_def_dt,
vec_init_def);
}
/* Set the loop-entry arg of the reduction-phi. */
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
{
/* Initialise the reduction phi to zero. This prevents initial
values of non-zero interferring with the reduction op. */
gcc_assert (ncopies == 1);
gcc_assert (i == 0);
tree vec_init_def_type = TREE_TYPE (vec_init_def);
tree induc_val_vec
= build_vector_from_val (vec_init_def_type, induc_val);
add_phi_arg (as_a <gphi *> (phi), induc_val_vec,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
}
else
add_phi_arg (as_a <gphi *> (phi), vec_init_def,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
/* Set the loop-latch arg for the reduction-phi. */
if (j > 0)
def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
UNKNOWN_LOCATION);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform reduction: created def-use cycle: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
}
}
}
/* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
which is updated with the current index of the loop for every match of
the original loop's cond_expr (VEC_STMT). This results in a vector
containing the last time the condition passed for that vector lane.
The first match will be a 1 to allow 0 to be used for non-matching
indexes. If there are no matches at all then the vector will be all
zeroes. */
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
{
tree indx_before_incr, indx_after_incr;
poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
tree cr_index_vector_type = build_vector_type
(cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
/* First we create a simple vector induction variable which starts
with the values {1,2,3,...} (SERIES_VECT) and increments by the
vector size (STEP). */
/* Create a {1,2,3,...} vector. */
tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
/* Create a vector of the step value. */
tree step = build_int_cst (cr_index_scalar_type, nunits_out);
tree vec_step = build_vector_from_val (cr_index_vector_type, step);
/* Create an induction variable. */
gimple_stmt_iterator incr_gsi;
bool insert_after;
standard_iv_increment_position (loop, &incr_gsi, &insert_after);
create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
insert_after, &indx_before_incr, &indx_after_incr);
/* Next create a new phi node vector (NEW_PHI_TREE) which starts
filled with zeros (VEC_ZERO). */
/* Create a vector of 0s. */
tree zero = build_zero_cst (cr_index_scalar_type);
tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
/* Create a vector phi node. */
tree new_phi_tree = make_ssa_name (cr_index_vector_type);
new_phi = create_phi_node (new_phi_tree, loop->header);
set_vinfo_for_stmt (new_phi,
new_stmt_vec_info (new_phi, loop_vinfo));
add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
/* Now take the condition from the loops original cond_expr
(VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
every match uses values from the induction variable
(INDEX_BEFORE_INCR) otherwise uses values from the phi node
(NEW_PHI_TREE).
Finally, we update the phi (NEW_PHI_TREE) to take the value of
the new cond_expr (INDEX_COND_EXPR). */
/* Duplicate the condition from vec_stmt. */
tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
/* Create a conditional, where the condition is taken from vec_stmt
(CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
else is the phi (NEW_PHI_TREE). */
tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
ccompare, indx_before_incr,
new_phi_tree);
induction_index = make_ssa_name (cr_index_vector_type);
gimple *index_condition = gimple_build_assign (induction_index,
index_cond_expr);
gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition,
loop_vinfo);
STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
set_vinfo_for_stmt (index_condition, index_vec_info);
/* Update the phi with the vec cond. */
add_phi_arg (as_a <gphi *> (new_phi), induction_index,
loop_latch_edge (loop), UNKNOWN_LOCATION);
}
/* 2. Create epilog code.
The reduction epilog code operates across the elements of the vector
of partial results computed by the vectorized loop.
The reduction epilog code consists of:
step 1: compute the scalar result in a vector (v_out2)
step 2: extract the scalar result (s_out3) from the vector (v_out2)
step 3: adjust the scalar result (s_out3) if needed.
Step 1 can be accomplished using one the following three schemes:
(scheme 1) using reduc_fn, if available.
(scheme 2) using whole-vector shifts, if available.
(scheme 3) using a scalar loop. In this case steps 1+2 above are
combined.
The overall epilog code looks like this:
s_out0 = phi <s_loop> # original EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1> # step 1
s_out3 = extract_field <v_out2, 0> # step 2
s_out4 = adjust_result <s_out3> # step 3
(step 3 is optional, and steps 1 and 2 may be combined).
Lastly, the uses of s_out0 are replaced by s_out4. */
/* 2.1 Create new loop-exit-phis to preserve loop-closed form:
v_out1 = phi <VECT_DEF>
Store them in NEW_PHIS. */
exit_bb = single_exit (loop)->dest;
prev_phi_info = NULL;
new_phis.create (vect_defs.length ());
FOR_EACH_VEC_ELT (vect_defs, i, def)
{
for (j = 0; j < ncopies; j++)
{
tree new_def = copy_ssa_name (def);
phi = create_phi_node (new_def, exit_bb);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
if (j == 0)
new_phis.quick_push (phi);
else
{
def = vect_get_vec_def_for_stmt_copy (dt, def);
STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
}
SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
prev_phi_info = vinfo_for_stmt (phi);
}
}
/* The epilogue is created for the outer-loop, i.e., for the loop being
vectorized. Create exit phis for the outer loop. */
if (double_reduc)
{
loop = outer_loop;
exit_bb = single_exit (loop)->dest;
inner_phis.create (vect_defs.length ());
FOR_EACH_VEC_ELT (new_phis, i, phi)
{
tree new_result = copy_ssa_name (PHI_RESULT (phi));
gphi *outer_phi = create_phi_node (new_result, exit_bb);
SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
PHI_RESULT (phi));
set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
loop_vinfo));
inner_phis.quick_push (phi);
new_phis[i] = outer_phi;
prev_phi_info = vinfo_for_stmt (outer_phi);
while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
{
phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
new_result = copy_ssa_name (PHI_RESULT (phi));
outer_phi = create_phi_node (new_result, exit_bb);
SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
PHI_RESULT (phi));
set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
loop_vinfo));
STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
prev_phi_info = vinfo_for_stmt (outer_phi);
}
}
}
exit_gsi = gsi_after_labels (exit_bb);
/* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
(i.e. when reduc_fn is not available) and in the final adjustment
code (if needed). Also get the original scalar reduction variable as
defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
represents a reduction pattern), the tree-code and scalar-def are
taken from the original stmt that the pattern-stmt (STMT) replaces.
Otherwise (it is a regular reduction) - the tree-code and scalar-def
are taken from STMT. */
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (!orig_stmt)
{
/* Regular reduction */
orig_stmt = stmt;
}
else
{
/* Reduction pattern */
stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
}
code = gimple_assign_rhs_code (orig_stmt);
/* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
partial results are added and not subtracted. */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
scalar_dest = gimple_assign_lhs (orig_stmt);
scalar_type = TREE_TYPE (scalar_dest);
scalar_results.create (group_size);
new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
bitsize = TYPE_SIZE (scalar_type);
/* In case this is a reduction in an inner-loop while vectorizing an outer
loop - we don't need to extract a single scalar result at the end of the
inner-loop (unless it is double reduction, i.e., the use of reduction is
outside the outer-loop). The final vector of partial results will be used
in the vectorized outer-loop, or reduced to a scalar result at the end of
the outer-loop. */
if (nested_in_vect_loop && !double_reduc)
goto vect_finalize_reduction;
/* SLP reduction without reduction chain, e.g.,
# a1 = phi <a2, a0>
# b1 = phi <b2, b0>
a2 = operation (a1)
b2 = operation (b1) */
slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
/* True if we should implement SLP_REDUC using native reduction operations
instead of scalar operations. */
direct_slp_reduc = (reduc_fn != IFN_LAST
&& slp_reduc
&& !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
/* In case of reduction chain, e.g.,
# a1 = phi <a3, a0>
a2 = operation (a1)
a3 = operation (a2),
we may end up with more than one vector result. Here we reduce them to
one vector. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
{
tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
for (k = 1; k < new_phis.length (); k++)
{
gimple *next_phi = new_phis[k];
tree second_vect = PHI_RESULT (next_phi);
tree tem = make_ssa_name (vec_dest, new_vec_stmt);
new_vec_stmt = gimple_build_assign (tem, code,
first_vect, second_vect);
gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
first_vect = tem;
}
new_phi_result = first_vect;
if (new_vec_stmt)
{
new_phis.truncate (0);
new_phis.safe_push (new_vec_stmt);
}
}
/* Likewise if we couldn't use a single defuse cycle. */
else if (ncopies > 1)
{
gcc_assert (new_phis.length () == 1);
tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
gimple *next_phi = new_phis[0];
for (int k = 1; k < ncopies; ++k)
{
next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi));
tree second_vect = PHI_RESULT (next_phi);
tree tem = make_ssa_name (vec_dest, new_vec_stmt);
new_vec_stmt = gimple_build_assign (tem, code,
first_vect, second_vect);
gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
first_vect = tem;
}
new_phi_result = first_vect;
new_phis.truncate (0);
new_phis.safe_push (new_vec_stmt);
}
else
new_phi_result = PHI_RESULT (new_phis[0]);
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
&& reduc_fn != IFN_LAST)
{
/* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
various data values where the condition matched and another vector
(INDUCTION_INDEX) containing all the indexes of those matches. We
need to extract the last matching index (which will be the index with
highest value) and use this to index into the data vector.
For the case where there were no matches, the data vector will contain
all default values and the index vector will be all zeros. */
/* Get various versions of the type of the vector of indexes. */
tree index_vec_type = TREE_TYPE (induction_index);
gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
tree index_scalar_type = TREE_TYPE (index_vec_type);
tree index_vec_cmp_type = build_same_sized_truth_vector_type
(index_vec_type);
/* Get an unsigned integer version of the type of the data vector. */
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
tree vectype_unsigned = build_vector_type
(scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
/* First we need to create a vector (ZERO_VEC) of zeros and another
vector (MAX_INDEX_VEC) filled with the last matching index, which we
can create using a MAX reduction and then expanding.
In the case where the loop never made any matches, the max index will
be zero. */
/* Vector of {0, 0, 0,...}. */
tree zero_vec = make_ssa_name (vectype);
tree zero_vec_rhs = build_zero_cst (vectype);
gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
/* Find maximum value from the vector of found indexes. */
tree max_index = make_ssa_name (index_scalar_type);
gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
1, induction_index);
gimple_call_set_lhs (max_index_stmt, max_index);
gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
/* Vector of {max_index, max_index, max_index,...}. */
tree max_index_vec = make_ssa_name (index_vec_type);
tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
max_index);
gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
max_index_vec_rhs);
gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
/* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
with the vector (INDUCTION_INDEX) of found indexes, choosing values
from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
otherwise. Only one value should match, resulting in a vector
(VEC_COND) with one data value and the rest zeros.
In the case where the loop never made any matches, every index will
match, resulting in a vector with all data values (which will all be
the default value). */
/* Compare the max index vector to the vector of found indexes to find
the position of the max value. */
tree vec_compare = make_ssa_name (index_vec_cmp_type);
gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
induction_index,
max_index_vec);
gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
/* Use the compare to choose either values from the data vector or
zero. */
tree vec_cond = make_ssa_name (vectype);
gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
vec_compare, new_phi_result,
zero_vec);
gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
/* Finally we need to extract the data value from the vector (VEC_COND)
into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
reduction, but because this doesn't exist, we can use a MAX reduction
instead. The data value might be signed or a float so we need to cast
it first.
In the case where the loop never made any matches, the data values are
all identical, and so will reduce down correctly. */
/* Make the matched data values unsigned. */
tree vec_cond_cast = make_ssa_name (vectype_unsigned);
tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
vec_cond);
gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
VIEW_CONVERT_EXPR,
vec_cond_cast_rhs);
gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
/* Reduce down to a scalar value. */
tree data_reduc = make_ssa_name (scalar_type_unsigned);
gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
1, vec_cond_cast);
gimple_call_set_lhs (data_reduc_stmt, data_reduc);
gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
/* Convert the reduced value back to the result type and set as the
result. */
gimple_seq stmts = NULL;
new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
data_reduc);
gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
scalar_results.safe_push (new_temp);
}
else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
&& reduc_fn == IFN_LAST)
{
/* Condition reduction without supported IFN_REDUC_MAX. Generate
idx = 0;
idx_val = induction_index[0];
val = data_reduc[0];
for (idx = 0, val = init, i = 0; i < nelts; ++i)
if (induction_index[i] > idx_val)
val = data_reduc[i], idx_val = induction_index[i];
return val; */
tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
/* Enforced by vectorizable_reduction, which ensures we have target
support before allowing a conditional reduction on variable-length
vectors. */
unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
tree idx_val = NULL_TREE, val = NULL_TREE;
for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
{
tree old_idx_val = idx_val;
tree old_val = val;
idx_val = make_ssa_name (idx_eltype);
epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, idx_eltype,
induction_index,
bitsize_int (el_size),
bitsize_int (off)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
val = make_ssa_name (data_eltype);
epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
build3 (BIT_FIELD_REF,
data_eltype,
new_phi_result,
bitsize_int (el_size),
bitsize_int (off)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (off != 0)
{
tree new_idx_val = idx_val;
tree new_val = val;
if (off != v_size - el_size)
{
new_idx_val = make_ssa_name (idx_eltype);
epilog_stmt = gimple_build_assign (new_idx_val,
MAX_EXPR, idx_val,
old_idx_val);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
new_val = make_ssa_name (data_eltype);
epilog_stmt = gimple_build_assign (new_val,
COND_EXPR,
build2 (GT_EXPR,
boolean_type_node,
idx_val,
old_idx_val),
val, old_val);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
idx_val = new_idx_val;
val = new_val;
}
}
/* Convert the reduced value back to the result type and set as the
result. */
gimple_seq stmts = NULL;
val = gimple_convert (&stmts, scalar_type, val);
gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
scalar_results.safe_push (val);
}
/* 2.3 Create the reduction code, using one of the three schemes described
above. In SLP we simply need to extract all the elements from the
vector (without reducing them), so we use scalar shifts. */
else if (reduc_fn != IFN_LAST && !slp_reduc)
{
tree tmp;
tree vec_elem_type;
/* Case 1: Create:
v_out2 = reduc_expr <v_out1> */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using direct vector reduction.\n");
vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
if (!useless_type_conversion_p (scalar_type, vec_elem_type))
{
tree tmp_dest
= vect_create_destination_var (scalar_dest, vec_elem_type);
epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
new_phi_result);
gimple_set_lhs (epilog_stmt, tmp_dest);
new_temp = make_ssa_name (tmp_dest, epilog_stmt);
gimple_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
new_temp);
}
else
{
epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
new_phi_result);
gimple_set_lhs (epilog_stmt, new_scalar_dest);
}
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !operand_equal_p (initial_def, induc_val, 0))
{
/* Earlier we set the initial value to be a vector if induc_val
values. Check the result and if it is induc_val then replace
with the original initial value, unless induc_val is
the same as initial_def already. */
tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
induc_val);
tmp = make_ssa_name (new_scalar_dest);
epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
initial_def, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
new_temp = tmp;
}
scalar_results.safe_push (new_temp);
}
else if (direct_slp_reduc)
{
/* Here we create one vector for each of the GROUP_SIZE results,
with the elements for other SLP statements replaced with the
neutral value. We can then do a normal reduction on each vector. */
/* Enforced by vectorizable_reduction. */
gcc_assert (new_phis.length () == 1);
gcc_assert (pow2p_hwi (group_size));
slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
gimple_seq seq = NULL;
/* Build a vector {0, 1, 2, ...}, with the same number of elements
and the same element size as VECTYPE. */
tree index = build_index_vector (vectype, 0, 1);
tree index_type = TREE_TYPE (index);
tree index_elt_type = TREE_TYPE (index_type);
tree mask_type = build_same_sized_truth_vector_type (index_type);
/* Create a vector that, for each element, identifies which of
the GROUP_SIZE results should use it. */
tree index_mask = build_int_cst (index_elt_type, group_size - 1);
index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
build_vector_from_val (index_type, index_mask));
/* Get a neutral vector value. This is simply a splat of the neutral
scalar value if we have one, otherwise the initial scalar value
is itself a neutral value. */
tree vector_identity = NULL_TREE;
if (neutral_op)
vector_identity = gimple_build_vector_from_val (&seq, vectype,
neutral_op);
for (unsigned int i = 0; i < group_size; ++i)
{
/* If there's no univeral neutral value, we can use the
initial scalar value from the original PHI. This is used
for MIN and MAX reduction, for example. */
if (!neutral_op)
{
tree scalar_value
= PHI_ARG_DEF_FROM_EDGE (orig_phis[i],
loop_preheader_edge (loop));
vector_identity = gimple_build_vector_from_val (&seq, vectype,
scalar_value);
}
/* Calculate the equivalent of:
sel[j] = (index[j] == i);
which selects the elements of NEW_PHI_RESULT that should
be included in the result. */
tree compare_val = build_int_cst (index_elt_type, i);
compare_val = build_vector_from_val (index_type, compare_val);
tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
index, compare_val);
/* Calculate the equivalent of:
vec = seq ? new_phi_result : vector_identity;
VEC is now suitable for a full vector reduction. */
tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
sel, new_phi_result, vector_identity);
/* Do the reduction and convert it to the appropriate type. */
gcall *call = gimple_build_call_internal (reduc_fn, 1, vec);
tree scalar = make_ssa_name (TREE_TYPE (vectype));
gimple_call_set_lhs (call, scalar);
gimple_seq_add_stmt (&seq, call);
scalar = gimple_convert (&seq, scalar_type, scalar);
scalar_results.safe_push (scalar);
}
gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
}
else
{
bool reduce_with_shift;
tree vec_temp;
/* COND reductions all do the final reduction with MAX_EXPR
or MIN_EXPR. */
if (code == COND_EXPR)
{
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
code = induc_code;
else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== CONST_COND_REDUCTION)
code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
else
code = MAX_EXPR;
}
/* See if the target wants to do the final (shift) reduction
in a vector mode of smaller size and first reduce upper/lower
halves against each other. */
enum machine_mode mode1 = mode;
tree vectype1 = vectype;
unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
unsigned sz1 = sz;
if (!slp_reduc
&& (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
sz1 = GET_MODE_SIZE (mode1).to_constant ();
vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
reduce_with_shift = have_whole_vector_shift (mode1);
if (!VECTOR_MODE_P (mode1))
reduce_with_shift = false;
else
{
optab optab = optab_for_tree_code (code, vectype1, optab_default);
if (optab_handler (optab, mode1) == CODE_FOR_nothing)
reduce_with_shift = false;
}
/* First reduce the vector to the desired vector size we should
do shift reduction on by combining upper and lower halves. */
new_temp = new_phi_result;
while (sz > sz1)
{
gcc_assert (!slp_reduc);
sz /= 2;
vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
/* The target has to make sure we support lowpart/highpart
extraction, either via direct vector extract or through
an integer mode punning. */
tree dst1, dst2;
if (convert_optab_handler (vec_extract_optab,
TYPE_MODE (TREE_TYPE (new_temp)),
TYPE_MODE (vectype1))
!= CODE_FOR_nothing)
{
/* Extract sub-vectors directly once vec_extract becomes
a conversion optab. */
dst1 = make_ssa_name (vectype1);
epilog_stmt
= gimple_build_assign (dst1, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, vectype1,
new_temp, TYPE_SIZE (vectype1),
bitsize_int (0)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst2 = make_ssa_name (vectype1);
epilog_stmt
= gimple_build_assign (dst2, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, vectype1,
new_temp, TYPE_SIZE (vectype1),
bitsize_int (sz * BITS_PER_UNIT)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
else
{
/* Extract via punning to appropriately sized integer mode
vector. */
tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
1);
tree etype = build_vector_type (eltype, 2);
gcc_assert (convert_optab_handler (vec_extract_optab,
TYPE_MODE (etype),
TYPE_MODE (eltype))
!= CODE_FOR_nothing);
tree tem = make_ssa_name (etype);
epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
etype, new_temp));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
new_temp = tem;
tem = make_ssa_name (eltype);
epilog_stmt
= gimple_build_assign (tem, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, eltype,
new_temp, TYPE_SIZE (eltype),
bitsize_int (0)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst1 = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
vectype1, tem));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
tem = make_ssa_name (eltype);
epilog_stmt
= gimple_build_assign (tem, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, eltype,
new_temp, TYPE_SIZE (eltype),
bitsize_int (sz * BITS_PER_UNIT)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst2 = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
vectype1, tem));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
new_temp = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
if (reduce_with_shift && !slp_reduc)
{
int element_bitsize = tree_to_uhwi (bitsize);
/* Enforced by vectorizable_reduction, which disallows SLP reductions
for variable-length vectors and also requires direct target support
for loop reductions. */
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
int nelements = vec_size_in_bits / element_bitsize;
vec_perm_builder sel;
vec_perm_indices indices;
int elt_offset;
tree zero_vec = build_zero_cst (vectype1);
/* Case 2: Create:
for (offset = nelements/2; offset >= 1; offset/=2)
{
Create: va' = vec_shift <va, offset>
Create: va = vop <va, va'>
} */
tree rhs;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using vector shifts\n");
mode1 = TYPE_MODE (vectype1);
vec_dest = vect_create_destination_var (scalar_dest, vectype1);
for (elt_offset = nelements / 2;
elt_offset >= 1;
elt_offset /= 2)
{
calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
indices.new_vector (sel, 2, nelements);
tree mask = vect_gen_perm_mask_any (vectype1, indices);
epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
new_temp, zero_vec, mask);
new_name = make_ssa_name (vec_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_name);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
new_temp);
new_temp = make_ssa_name (vec_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
/* 2.4 Extract the final scalar result. Create:
s_out3 = extract_field <v_out2, bitpos> */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"extract scalar result\n");
rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
bitsize, bitsize_zero_node);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
scalar_results.safe_push (new_temp);
}
else
{
/* Case 3: Create:
s = extract_field <v_out2, 0>
for (offset = element_size;
offset < vector_size;
offset += element_size;)
{
Create: s' = extract_field <v_out2, offset>
Create: s = op <s, s'> // For non SLP cases
} */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using scalar code.\n");
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
int element_bitsize = tree_to_uhwi (bitsize);
FOR_EACH_VEC_ELT (new_phis, i, new_phi)
{
int bit_offset;
if (gimple_code (new_phi) == GIMPLE_PHI)
vec_temp = PHI_RESULT (new_phi);
else
vec_temp = gimple_assign_lhs (new_phi);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
bitsize_zero_node);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
/* In SLP we don't need to apply reduction operation, so we just
collect s' values in SCALAR_RESULTS. */
if (slp_reduc)
scalar_results.safe_push (new_temp);
for (bit_offset = element_bitsize;
bit_offset < vec_size_in_bits;
bit_offset += element_bitsize)
{
tree bitpos = bitsize_int (bit_offset);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
bitsize, bitpos);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_name);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (slp_reduc)
{
/* In SLP we don't need to apply reduction operation, so
we just collect s' values in SCALAR_RESULTS. */
new_temp = new_name;
scalar_results.safe_push (new_name);
}
else
{
epilog_stmt = gimple_build_assign (new_scalar_dest, code,
new_name, new_temp);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
}
}
/* The only case where we need to reduce scalar results in SLP, is
unrolling. If the size of SCALAR_RESULTS is greater than
GROUP_SIZE, we reduce them combining elements modulo
GROUP_SIZE. */
if (slp_reduc)
{
tree res, first_res, new_res;
gimple *new_stmt;
/* Reduce multiple scalar results in case of SLP unrolling. */
for (j = group_size; scalar_results.iterate (j, &res);
j++)
{
first_res = scalar_results[j % group_size];
new_stmt = gimple_build_assign (new_scalar_dest, code,
first_res, res);
new_res = make_ssa_name (new_scalar_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_res);
gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
scalar_results[j % group_size] = new_res;
}
}
else
/* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
scalar_results.safe_push (new_temp);
}
if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !operand_equal_p (initial_def, induc_val, 0))
{
/* Earlier we set the initial value to be a vector if induc_val
values. Check the result and if it is induc_val then replace
with the original initial value, unless induc_val is
the same as initial_def already. */
tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
induc_val);
tree tmp = make_ssa_name (new_scalar_dest);
epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
initial_def, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
scalar_results[0] = tmp;
}
}
vect_finalize_reduction:
if (double_reduc)
loop = loop->inner;
/* 2.5 Adjust the final result by the initial value of the reduction
variable. (When such adjustment is not needed, then
'adjustment_def' is zero). For example, if code is PLUS we create:
new_temp = loop_exit_def + adjustment_def */
if (adjustment_def)
{
gcc_assert (!slp_reduc);
if (nested_in_vect_loop)
{
new_phi = new_phis[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, vectype);
}
else
{
new_temp = scalar_results[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
expr = build2 (code, scalar_type, new_temp, adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, scalar_type);
}
epilog_stmt = gimple_build_assign (new_dest, expr);
new_temp = make_ssa_name (new_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (nested_in_vect_loop)
{
set_vinfo_for_stmt (epilog_stmt,
new_stmt_vec_info (epilog_stmt, loop_vinfo));
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
if (!double_reduc)
scalar_results.quick_push (new_temp);
else
scalar_results[0] = new_temp;
}
else
scalar_results[0] = new_temp;
new_phis[0] = epilog_stmt;
}
/* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
phis with new adjusted scalar results, i.e., replace use <s_out0>
with use <s_out4>.
Transform:
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out0>
use <s_out0>
into:
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out4>
use <s_out4> */
/* In SLP reduction chain we reduce vector results into one vector if
necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
the last stmt in the reduction chain, since we are looking for the loop
exit phi node. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
/* Handle reduction patterns. */
if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)))
dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt));
scalar_dest = gimple_assign_lhs (dest_stmt);
group_size = 1;
}
/* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
case that GROUP_SIZE is greater than vectorization factor). Therefore, we
need to match SCALAR_RESULTS with corresponding statements. The first
(GROUP_SIZE / number of new vector stmts) scalar results correspond to
the first vector stmt, etc.
(RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
if (group_size > new_phis.length ())
{
ratio = group_size / new_phis.length ();
gcc_assert (!(group_size % new_phis.length ()));
}
else
ratio = 1;
for (k = 0; k < group_size; k++)
{
if (k % ratio == 0)
{
epilog_stmt = new_phis[k / ratio];
reduction_phi = reduction_phis[k / ratio];
if (double_reduc)
inner_phi = inner_phis[k / ratio];
}
if (slp_reduc)
{
gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
/* SLP statements can't participate in patterns. */
gcc_assert (!orig_stmt);
scalar_dest = gimple_assign_lhs (current_stmt);
}
phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses -
one at the latch block, and one at the loop exit). */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
&& !is_gimple_debug (USE_STMT (use_p)))
phis.safe_push (USE_STMT (use_p));
/* While we expect to have found an exit_phi because of loop-closed-ssa
form we can end up without one if the scalar cycle is dead. */
FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
if (outer_loop)
{
stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
gphi *vect_phi;
/* FORNOW. Currently not supporting the case that an inner-loop
reduction is not used in the outer-loop (but only outside the
outer-loop), unless it is double reduction. */
gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo))
|| double_reduc);
if (double_reduc)
STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
else
STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
if (!double_reduc
|| STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
!= vect_double_reduction_def)
continue;
/* Handle double reduction:
stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
At that point the regular reduction (stmt2 and stmt3) is
already vectorized, as well as the exit phi node, stmt4.
Here we vectorize the phi node of double reduction, stmt1, and
update all relevant statements. */
/* Go through all the uses of s2 to find double reduction phi
node, i.e., stmt1 above. */
orig_name = PHI_RESULT (exit_phi);
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
{
stmt_vec_info use_stmt_vinfo;
stmt_vec_info new_phi_vinfo;
tree vect_phi_init, preheader_arg, vect_phi_res;
basic_block bb = gimple_bb (use_stmt);
gimple *use;
/* Check that USE_STMT is really double reduction phi
node. */
if (gimple_code (use_stmt) != GIMPLE_PHI
|| gimple_phi_num_args (use_stmt) != 2
|| bb->loop_father != outer_loop)
continue;
use_stmt_vinfo = vinfo_for_stmt (use_stmt);
if (!use_stmt_vinfo
|| STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
!= vect_double_reduction_def)
continue;
/* Create vector phi node for double reduction:
vs1 = phi <vs0, vs2>
vs1 was created previously in this function by a call to
vect_get_vec_def_for_operand and is stored in
vec_initial_def;
vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
vs0 is created here. */
/* Create vector phi node. */
vect_phi = create_phi_node (vec_initial_def, bb);
new_phi_vinfo = new_stmt_vec_info (vect_phi,
loop_vec_info_for_loop (outer_loop));
set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
/* Create vs0 - initial def of the double reduction phi. */
preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
loop_preheader_edge (outer_loop));
vect_phi_init = get_initial_def_for_reduction
(stmt, preheader_arg, NULL);
/* Update phi node arguments with vs0 and vs2. */
add_phi_arg (vect_phi, vect_phi_init,
loop_preheader_edge (outer_loop),
UNKNOWN_LOCATION);
add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created double reduction phi node: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
}
vect_phi_res = PHI_RESULT (vect_phi);
/* Replace the use, i.e., set the correct vs1 in the regular
reduction phi node. FORNOW, NCOPIES is always 1, so the
loop is redundant. */
use = reduction_phi;
for (j = 0; j < ncopies; j++)
{
edge pr_edge = loop_preheader_edge (loop);
SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
}
}
}
}
phis.release ();
if (nested_in_vect_loop)
{
if (double_reduc)
loop = outer_loop;
else
continue;
}
phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses,
one at the latch block, and one at the loop exit). For double
reductions we are looking for exit phis of the outer loop. */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
{
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
{
if (!is_gimple_debug (USE_STMT (use_p)))
phis.safe_push (USE_STMT (use_p));
}
else
{
if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
{
tree phi_res = PHI_RESULT (USE_STMT (use_p));
FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
{
if (!flow_bb_inside_loop_p (loop,
gimple_bb (USE_STMT (phi_use_p)))
&& !is_gimple_debug (USE_STMT (phi_use_p)))
phis.safe_push (USE_STMT (phi_use_p));
}
}
}
}
FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
/* Replace the uses: */
orig_name = PHI_RESULT (exit_phi);
scalar_result = scalar_results[k];
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, scalar_result);
}
phis.release ();
}
}
/* Return a vector of type VECTYPE that is equal to the vector select
operation "MASK ? VEC : IDENTITY". Insert the select statements
before GSI. */
static tree
merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
tree vec, tree identity)
{
tree cond = make_temp_ssa_name (vectype, NULL, "cond");
gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
mask, vec, identity);
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
return cond;
}
/* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
order, starting with LHS. Insert the extraction statements before GSI and
associate the new scalar SSA names with variable SCALAR_DEST.
Return the SSA name for the result. */
static tree
vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
tree_code code, tree lhs, tree vector_rhs)
{
tree vectype = TREE_TYPE (vector_rhs);
tree scalar_type = TREE_TYPE (vectype);
tree bitsize = TYPE_SIZE (scalar_type);
unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
for (unsigned HOST_WIDE_INT bit_offset = 0;
bit_offset < vec_size_in_bits;
bit_offset += element_bitsize)
{
tree bitpos = bitsize_int (bit_offset);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
bitsize, bitpos);
gassign *stmt = gimple_build_assign (scalar_dest, rhs);
rhs = make_ssa_name (scalar_dest, stmt);
gimple_assign_set_lhs (stmt, rhs);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
tree new_name = make_ssa_name (scalar_dest, stmt);
gimple_assign_set_lhs (stmt, new_name);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
lhs = new_name;
}
return lhs;
}
/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
statement that sets the live-out value. REDUC_DEF_STMT is the phi
statement. CODE is the operation performed by STMT and OPS are
its scalar operands. REDUC_INDEX is the index of the operand in
OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
implements in-order reduction, or IFN_LAST if we should open-code it.
VECTYPE_IN is the type of the vector input. MASKS specifies the masks
that should be used to control the operation in a fully-masked loop. */
static bool
vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gimple **vec_stmt, slp_tree slp_node,
gimple *reduc_def_stmt,
tree_code code, internal_fn reduc_fn,
tree ops[3], tree vectype_in,
int reduc_index, vec_loop_masks *masks)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
gimple *new_stmt = NULL;
int ncopies;
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
gcc_assert (!nested_in_vect_loop_p (loop, stmt));
gcc_assert (ncopies == 1);
gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== FOLD_LEFT_REDUCTION);
if (slp_node)
gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
TYPE_VECTOR_SUBPARTS (vectype_in)));
tree op0 = ops[1 - reduc_index];
int group_size = 1;
gimple *scalar_dest_def;
auto_vec<tree> vec_oprnds0;
if (slp_node)
{
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node);
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_dest_def = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
}
else
{
tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt);
vec_oprnds0.create (1);
vec_oprnds0.quick_push (loop_vec_def0);
scalar_dest_def = stmt;
}
tree scalar_dest = gimple_assign_lhs (scalar_dest_def);
tree scalar_type = TREE_TYPE (scalar_dest);
tree reduc_var = gimple_phi_result (reduc_def_stmt);
int vec_num = vec_oprnds0.length ();
gcc_assert (vec_num == 1 || slp_node);
tree vec_elem_type = TREE_TYPE (vectype_out);
gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
tree vector_identity = NULL_TREE;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
vector_identity = build_zero_cst (vectype_out);
tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
int i;
tree def0;
FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
tree mask = NULL_TREE;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
/* Handle MINUS by adding the negative. */
if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
{
tree negated = make_ssa_name (vectype_out);
new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
def0 = negated;
}
if (mask)
def0 = merge_with_identity (gsi, mask, vectype_out, def0,
vector_identity);
/* On the first iteration the input is simply the scalar phi
result, and for subsequent iterations it is the output of
the preceding operation. */
if (reduc_fn != IFN_LAST)
{
new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
/* For chained SLP reductions the output of the previous reduction
operation serves as the input of the next. For the final statement
the output cannot be a temporary - we reuse the original
scalar destination of the last statement. */
if (i != vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest_var);
reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
gimple_set_lhs (new_stmt, reduc_var);
}
}
else
{
reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
reduc_var, def0);
new_stmt = SSA_NAME_DEF_STMT (reduc_var);
/* Remove the statement, so that we can use the same code paths
as for statements that we've just created. */
gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
gsi_remove (&tmp_gsi, true);
}
if (i == vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest);
vect_finish_replace_stmt (scalar_dest_def, new_stmt);
}
else
vect_finish_stmt_generation (scalar_dest_def, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (!slp_node)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
return true;
}
/* Function is_nonwrapping_integer_induction.
Check if STMT (which is part of loop LOOP) both increments and
does not cause overflow. */
static bool
is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
widest_int ni, max_loop_value, lhs_max;
bool overflow = false;
/* Make sure the loop is integer based. */
if (TREE_CODE (base) != INTEGER_CST
|| TREE_CODE (step) != INTEGER_CST)
return false;
/* Check that the max size of the loop will not wrap. */
if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
return true;
if (! max_stmt_executions (loop, &ni))
return false;
max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
&overflow);
if (overflow)
return false;
max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
TYPE_SIGN (lhs_type), &overflow);
if (overflow)
return false;
return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
<= TYPE_PRECISION (lhs_type));
}
/* Function vectorizable_reduction.
Check if STMT performs a reduction operation that can be vectorized.
If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise.
This function also handles reduction idioms (patterns) that have been
recognized in advance during vect_pattern_recog. In this case, STMT may be
of this form:
X = pattern_expr (arg0, arg1, ..., X)
and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
sequence that had been detected and replaced by the pattern-stmt (STMT).
This function also handles reduction of condition expressions, for example:
for (int i = 0; i < N; i++)
if (a[i] < value)
last = a[i];
This is handled by vectorising the loop and creating an additional vector
containing the loop indexes for which "a[i] < value" was true. In the
function epilogue this is reduced to a single max value and then used to
index into the vector of results.
In some cases of reduction patterns, the type of the reduction variable X is
different than the type of the other arguments of STMT.
In such cases, the vectype that is used when transforming STMT into a vector
stmt is different than the vectype that is used to determine the
vectorization factor, because it consists of a different number of elements
than the actual number of elements that are being operated upon in parallel.
For example, consider an accumulation of shorts into an int accumulator.
On some targets it's possible to vectorize this pattern operating on 8
shorts at a time (hence, the vectype for purposes of determining the
vectorization factor should be V8HI); on the other hand, the vectype that
is used to create the vector form is actually V4SI (the type of the result).
Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
indicates what is the actual level of parallelism (V8HI in the example), so
that the right vectorization factor would be derived. This vectype
corresponds to the type of arguments to the reduction stmt, and should *NOT*
be used to create the vectorized stmt. The right vectype for the vectorized
stmt is obtained from the type of the result X:
get_vectype_for_scalar_type (TREE_TYPE (X))
This means that, contrary to "regular" reductions (or "regular" stmts in
general), the following equation:
STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
does *NOT* necessarily hold for reduction patterns. */
bool
vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gimple **vec_stmt, slp_tree slp_node,
slp_instance slp_node_instance)
{
tree vec_dest;
tree scalar_dest;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
tree vectype_in = NULL_TREE;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, orig_code;
internal_fn reduc_fn;
machine_mode vec_mode;
int op_type;
optab optab;
tree new_temp = NULL_TREE;
gimple *def_stmt;
enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
gimple *cond_reduc_def_stmt = NULL;
enum tree_code cond_reduc_op_code = ERROR_MARK;
tree scalar_type;
bool is_simple_use;
gimple *orig_stmt;
stmt_vec_info orig_stmt_info = NULL;
int i;
int ncopies;
int epilog_copies;
stmt_vec_info prev_stmt_info, prev_phi_info;
bool single_defuse_cycle = false;
gimple *new_stmt = NULL;
int j;
tree ops[3];
enum vect_def_type dts[3];
bool nested_cycle = false, found_nested_cycle_def = false;
bool double_reduc = false;
basic_block def_bb;
struct loop * def_stmt_loop, *outer_loop = NULL;
tree def_arg;
gimple *def_arg_stmt;
auto_vec<tree> vec_oprnds0;
auto_vec<tree> vec_oprnds1;
auto_vec<tree> vec_oprnds2;
auto_vec<tree> vect_defs;
auto_vec<gimple *> phis;
int vec_num;
tree def0, tem;
bool first_p = true;
tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
tree cond_reduc_val = NULL_TREE;
/* Make sure it was already recognized as a reduction computation. */
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle)
return false;
if (nested_in_vect_loop_p (loop, stmt))
{
outer_loop = loop;
loop = loop->inner;
nested_cycle = true;
}
/* In case of reduction chain we switch to the first stmt in the chain, but
we don't update STMT_INFO, since only the last stmt is marked as reduction
and has reduction properties. */
if (GROUP_FIRST_ELEMENT (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
{
stmt = GROUP_FIRST_ELEMENT (stmt_info);
first_p = false;
}
if (gimple_code (stmt) == GIMPLE_PHI)
{
/* Analysis is fully done on the reduction stmt invocation. */
if (! vec_stmt)
{
if (slp_node)
slp_node_instance->reduc_phis = slp_node;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
}
if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
/* Leave the scalar phi in place. Note that checking
STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
for reductions involving a single statement. */
return true;
gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt)))
reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt));
if (STMT_VINFO_VEC_REDUCTION_TYPE (vinfo_for_stmt (reduc_stmt))
== EXTRACT_LAST_REDUCTION)
/* Leave the scalar phi in place. */
return true;
gcc_assert (is_gimple_assign (reduc_stmt));
for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
{
tree op = gimple_op (reduc_stmt, k);
if (op == gimple_phi_result (stmt))
continue;
if (k == 1
&& gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
continue;
if (!vectype_in
|| (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
< GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
break;
}
gcc_assert (vectype_in);
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
use_operand_p use_p;
gimple *use_stmt;
if (ncopies > 1
&& (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt))
<= vect_used_only_live)
&& single_imm_use (gimple_phi_result (stmt), &use_p, &use_stmt)
&& (use_stmt == reduc_stmt
|| (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt))
== reduc_stmt)))
single_defuse_cycle = true;
/* Create the destination vector */
scalar_dest = gimple_assign_lhs (reduc_stmt);
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
if (slp_node)
/* The size vect_schedule_slp_instance computes is off for us. */
vec_num = vect_get_num_vectors
(LOOP_VINFO_VECT_FACTOR (loop_vinfo)
* SLP_TREE_SCALAR_STMTS (slp_node).length (),
vectype_in);
else
vec_num = 1;
/* Generate the reduction PHIs upfront. */
prev_phi_info = NULL;
for (j = 0; j < ncopies; j++)
{
if (j == 0 || !single_defuse_cycle)
{
for (i = 0; i < vec_num; i++)
{
/* Create the reduction-phi that defines the reduction
operand. */
gimple *new_phi = create_phi_node (vec_dest, loop->header);
set_vinfo_for_stmt (new_phi,
new_stmt_vec_info (new_phi, loop_vinfo));
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
else
{
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi;
else
STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
prev_phi_info = vinfo_for_stmt (new_phi);
}
}
}
}
return true;
}
/* 1. Is vectorizable reduction? */
/* Not supportable if the reduction variable is used in the loop, unless
it's a reduction chain. */
if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
&& !GROUP_FIRST_ELEMENT (stmt_info))
return false;
/* Reductions that are not used even in an enclosing outer-loop,
are expected to be "live" (used out of the loop). */
if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
&& !STMT_VINFO_LIVE_P (stmt_info))
return false;
/* 2. Has this been recognized as a reduction pattern?
Check if STMT represents a pattern that has been recognized
in earlier analysis stages. For stmts that represent a pattern,
the STMT_VINFO_RELATED_STMT field records the last stmt in
the original sequence that constitutes the pattern. */
orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
if (orig_stmt)
{
orig_stmt_info = vinfo_for_stmt (orig_stmt);
gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
}
/* 3. Check the operands of the operation. The first operands are defined
inside the loop body. The last operand is the reduction variable,
which is defined by the loop-header-phi. */
gcc_assert (is_gimple_assign (stmt));
/* Flatten RHS. */
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_BINARY_RHS:
code = gimple_assign_rhs_code (stmt);
op_type = TREE_CODE_LENGTH (code);
gcc_assert (op_type == binary_op);
ops[0] = gimple_assign_rhs1 (stmt);
ops[1] = gimple_assign_rhs2 (stmt);
break;
case GIMPLE_TERNARY_RHS:
code = gimple_assign_rhs_code (stmt);
op_type = TREE_CODE_LENGTH (code);
gcc_assert (op_type == ternary_op);
ops[0] = gimple_assign_rhs1 (stmt);
ops[1] = gimple_assign_rhs2 (stmt);
ops[2] = gimple_assign_rhs3 (stmt);
break;
case GIMPLE_UNARY_RHS:
return false;
default:
gcc_unreachable ();
}
if (code == COND_EXPR && slp_node)
return false;
scalar_dest = gimple_assign_lhs (stmt);
scalar_type = TREE_TYPE (scalar_dest);
if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
&& !SCALAR_FLOAT_TYPE_P (scalar_type))
return false;
/* Do not try to vectorize bit-precision reductions. */
if (!type_has_mode_precision_p (scalar_type))
return false;
/* All uses but the last are expected to be defined in the loop.
The last use is the reduction variable. In case of nested cycle this
assumption is not true: we use reduc_index to record the index of the
reduction variable. */
gimple *reduc_def_stmt = NULL;
int reduc_index = -1;
for (i = 0; i < op_type; i++)
{
/* The condition of COND_EXPR is checked in vectorizable_condition(). */
if (i == 0 && code == COND_EXPR)
continue;
is_simple_use = vect_is_simple_use (ops[i], loop_vinfo,
&def_stmt, &dts[i], &tem);
dt = dts[i];
gcc_assert (is_simple_use);
if (dt == vect_reduction_def)
{
reduc_def_stmt = def_stmt;
reduc_index = i;
continue;
}
else if (tem)
{
/* To properly compute ncopies we are interested in the widest
input type in case we're looking at a widening accumulation. */
if (!vectype_in
|| (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
< GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
vectype_in = tem;
}
if (dt != vect_internal_def
&& dt != vect_external_def
&& dt != vect_constant_def
&& dt != vect_induction_def
&& !(dt == vect_nested_cycle && nested_cycle))
return false;
if (dt == vect_nested_cycle)
{
found_nested_cycle_def = true;
reduc_def_stmt = def_stmt;
reduc_index = i;
}
if (i == 1 && code == COND_EXPR)
{
/* Record how value of COND_EXPR is defined. */
if (dt == vect_constant_def)
{
cond_reduc_dt = dt;
cond_reduc_val = ops[i];
}
if (dt == vect_induction_def
&& def_stmt != NULL
&& is_nonwrapping_integer_induction (def_stmt, loop))
{
cond_reduc_dt = dt;
cond_reduc_def_stmt = def_stmt;
}
}
}
if (!vectype_in)
vectype_in = vectype_out;
/* When vectorizing a reduction chain w/o SLP the reduction PHI is not
directy used in stmt. */
if (reduc_index == -1)
{
if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order reduction chain without SLP.\n");
return false;
}
if (orig_stmt)
reduc_def_stmt = STMT_VINFO_REDUC_DEF (orig_stmt_info);
else
reduc_def_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
}
if (! reduc_def_stmt || gimple_code (reduc_def_stmt) != GIMPLE_PHI)
return false;
if (!(reduc_index == -1
|| dts[reduc_index] == vect_reduction_def
|| dts[reduc_index] == vect_nested_cycle
|| ((dts[reduc_index] == vect_internal_def
|| dts[reduc_index] == vect_external_def
|| dts[reduc_index] == vect_constant_def
|| dts[reduc_index] == vect_induction_def)
&& nested_cycle && found_nested_cycle_def)))
{
/* For pattern recognized stmts, orig_stmt might be a reduction,
but some helper statements for the pattern might not, or
might be COND_EXPRs with reduction uses in the condition. */
gcc_assert (orig_stmt);
return false;
}
stmt_vec_info reduc_def_info = vinfo_for_stmt (reduc_def_stmt);
enum vect_reduction_type v_reduc_type
= STMT_VINFO_REDUC_TYPE (reduc_def_info);
gimple *tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
/* If we have a condition reduction, see if we can simplify it further. */
if (v_reduc_type == COND_REDUCTION)
{
/* TODO: We can't yet handle reduction chains, since we need to treat
each COND_EXPR in the chain specially, not just the last one.
E.g. for:
x_1 = PHI <x_3, ...>
x_2 = a_2 ? ... : x_1;
x_3 = a_3 ? ... : x_2;
we're interested in the last element in x_3 for which a_2 || a_3
is true, whereas the current reduction chain handling would
vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
as a reduction operation. */
if (reduc_index == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conditional reduction chains not supported\n");
return false;
}
/* vect_is_simple_reduction ensured that operand 2 is the
loop-carried operand. */
gcc_assert (reduc_index == 2);
/* Loop peeling modifies initial value of reduction PHI, which
makes the reduction stmt to be transformed different to the
original stmt analyzed. We need to record reduction code for
CONST_COND_REDUCTION type reduction at analyzing stage, thus
it can be used directly at transform stage. */
if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
|| STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
{
/* Also set the reduction type to CONST_COND_REDUCTION. */
gcc_assert (cond_reduc_dt == vect_constant_def);
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
}
else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
vectype_in, OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"optimizing condition reduction with"
" FOLD_EXTRACT_LAST.\n");
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
}
else if (cond_reduc_dt == vect_induction_def)
{
stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt);
tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
gcc_assert (TREE_CODE (base) == INTEGER_CST
&& TREE_CODE (step) == INTEGER_CST);
cond_reduc_val = NULL_TREE;
/* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
above base; punt if base is the minimum value of the type for
MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
if (tree_int_cst_sgn (step) == -1)
{
cond_reduc_op_code = MIN_EXPR;
if (tree_int_cst_sgn (base) == -1)
cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
else if (tree_int_cst_lt (base,
TYPE_MAX_VALUE (TREE_TYPE (base))))
cond_reduc_val
= int_const_binop (PLUS_EXPR, base, integer_one_node);
}
else
{
cond_reduc_op_code = MAX_EXPR;
if (tree_int_cst_sgn (base) == 1)
cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
base))
cond_reduc_val
= int_const_binop (MINUS_EXPR, base, integer_one_node);
}
if (cond_reduc_val)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"integer induction.\n");
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
= INTEGER_INDUC_COND_REDUCTION;
}
}
else if (cond_reduc_dt == vect_constant_def)
{
enum vect_def_type cond_initial_dt;
gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
tree cond_initial_val
= PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
gcc_assert (cond_reduc_val != NULL_TREE);
vect_is_simple_use (cond_initial_val, loop_vinfo,
&def_stmt, &cond_initial_dt);
if (cond_initial_dt == vect_constant_def
&& types_compatible_p (TREE_TYPE (cond_initial_val),
TREE_TYPE (cond_reduc_val)))
{
tree e = fold_binary (LE_EXPR, boolean_type_node,
cond_initial_val, cond_reduc_val);
if (e && (integer_onep (e) || integer_zerop (e)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"compile time constant.\n");
/* Record reduction code at analysis stage. */
STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
= integer_onep (e) ? MAX_EXPR : MIN_EXPR;
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
= CONST_COND_REDUCTION;
}
}
}
}
if (orig_stmt)
gcc_assert (tmp == orig_stmt
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt);
else
/* We changed STMT to be the first stmt in reduction chain, hence we
check that in this case the first element in the chain is STMT. */
gcc_assert (stmt == tmp
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false;
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
gcc_assert (ncopies >= 1);
vec_mode = TYPE_MODE (vectype_in);
poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (code == COND_EXPR)
{
/* Only call during the analysis stage, otherwise we'll lose
STMT_VINFO_TYPE. */
if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
ops[reduc_index], 0, NULL))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported condition in reduction\n");
return false;
}
}
else
{
/* 4. Supportable by target? */
if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
|| code == LROTATE_EXPR || code == RROTATE_EXPR)
{
/* Shifts and rotates are only supported by vectorizable_shifts,
not vectorizable_reduction. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported shift or rotation.\n");
return false;
}
/* 4.1. check support for the operation in the loop */
optab = optab_for_tree_code (code, vectype_in, optab_default);
if (!optab)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.\n");
return false;
}
if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "op not supported by target.\n");
if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| !vect_worthwhile_without_simd_p (loop_vinfo, code))
return false;
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "proceeding using word mode.\n");
}
/* Worthwhile without SIMD support? */
if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
&& !vect_worthwhile_without_simd_p (loop_vinfo, code))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.\n");
return false;
}
}
/* 4.2. Check support for the epilog operation.
If STMT represents a reduction pattern, then the type of the
reduction variable may be different than the type of the rest
of the arguments. For example, consider the case of accumulation
of shorts into an int accumulator; The original code:
S1: int_a = (int) short_a;
orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
was replaced with:
STMT: int_acc = widen_sum <short_a, int_acc>
This means that:
1. The tree-code that is used to create the vector operation in the
epilog code (that reduces the partial results) is not the
tree-code of STMT, but is rather the tree-code of the original
stmt from the pattern that STMT is replacing. I.e, in the example
above we want to use 'widen_sum' in the loop, but 'plus' in the
epilog.
2. The type (mode) we use to check available target support
for the vector operation to be created in the *epilog*, is
determined by the type of the reduction variable (in the example
above we'd check this: optab_handler (plus_optab, vect_int_mode])).
However the type (mode) we use to check available target support
for the vector operation to be created *inside the loop*, is
determined by the type of the other arguments to STMT (in the
example we'd check this: optab_handler (widen_sum_optab,
vect_short_mode)).
This is contrary to "regular" reductions, in which the types of all
the arguments are the same as the type of the reduction variable.
For "regular" reductions we can therefore use the same vector type
(and also the same tree-code) when generating the epilog code and
when generating the code inside the loop. */
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (orig_stmt
&& (reduction_type == TREE_CODE_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION))
{
/* This is a reduction pattern: get the vectype from the type of the
reduction variable, and get the tree-code from orig_stmt. */
orig_code = gimple_assign_rhs_code (orig_stmt);
gcc_assert (vectype_out);
vec_mode = TYPE_MODE (vectype_out);
}
else
{
/* Regular reduction: use the same vectype and tree-code as used for
the vector code inside the loop can be used for the epilog code. */
orig_code = code;
if (code == MINUS_EXPR)
orig_code = PLUS_EXPR;
/* For simple condition reductions, replace with the actual expression
we want to base our reduction around. */
if (reduction_type == CONST_COND_REDUCTION)
{
orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
}
else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
orig_code = cond_reduc_op_code;
}
if (nested_cycle)
{
def_bb = gimple_bb (reduc_def_stmt);
def_stmt_loop = def_bb->loop_father;
def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
loop_preheader_edge (def_stmt_loop));
if (TREE_CODE (def_arg) == SSA_NAME
&& (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
&& gimple_code (def_arg_stmt) == GIMPLE_PHI
&& flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
&& vinfo_for_stmt (def_arg_stmt)
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
== vect_double_reduction_def)
double_reduc = true;
}
reduc_fn = IFN_LAST;
if (reduction_type == TREE_CODE_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION
|| reduction_type == INTEGER_INDUC_COND_REDUCTION
|| reduction_type == CONST_COND_REDUCTION)
{
if (reduction_type == FOLD_LEFT_REDUCTION
? fold_left_reduction_fn (orig_code, &reduc_fn)
: reduction_fn_for_scalar_code (orig_code, &reduc_fn))
{
if (reduc_fn != IFN_LAST
&& !direct_internal_fn_supported_p (reduc_fn, vectype_out,
OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc op not supported by target.\n");
reduc_fn = IFN_LAST;
}
}
else
{
if (!nested_cycle || double_reduc)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no reduc code for scalar code.\n");
return false;
}
}
}
else if (reduction_type == COND_REDUCTION)
{
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
cr_index_scalar_type = make_unsigned_type (scalar_precision);
cr_index_vector_type = build_vector_type (cr_index_scalar_type,
nunits_out);
if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
OPTIMIZE_FOR_SPEED))
reduc_fn = IFN_REDUC_MAX;
}
if (reduction_type != EXTRACT_LAST_REDUCTION
&& reduc_fn == IFN_LAST
&& !nunits_out.is_constant ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"missing target support for reduction on"
" variable-length vectors.\n");
return false;
}
if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
&& ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in double reduction or condition "
"reduction.\n");
return false;
}
/* For SLP reductions, see if there is a neutral value we can use. */
tree neutral_op = NULL_TREE;
if (slp_node)
neutral_op
= neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code,
GROUP_FIRST_ELEMENT (stmt_info) != NULL);
if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{
/* We can't support in-order reductions of code such as this:
for (int i = 0; i < n1; ++i)
for (int j = 0; j < n2; ++j)
l += a[j];
since GCC effectively transforms the loop when vectorizing:
for (int i = 0; i < n1 / VF; ++i)
for (int j = 0; j < n2; ++j)
for (int k = 0; k < VF; ++k)
l += a[j];
which is a reassociation of the original operation. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order double reduction not supported.\n");
return false;
}
if (reduction_type == FOLD_LEFT_REDUCTION
&& slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* We cannot use in-order reductions in this case because there is
an implicit reassociation of the operations involved. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order unchained SLP reductions not supported.\n");
return false;
}
/* For double reductions, and for SLP reductions with a neutral value,
we construct a variable-length initial vector by loading a vector
full of the neutral value and then shift-and-inserting the start
values into the low-numbered elements. */
if ((double_reduc || neutral_op)
&& !nunits_out.is_constant ()
&& !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
vectype_out, OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction on variable-length vectors requires"
" target support for a vector-shift-and-insert"
" operation.\n");
return false;
}
/* Check extra constraints for variable-length unchained SLP reductions. */
if (STMT_SLP_TYPE (stmt_info)
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& !nunits_out.is_constant ())
{
/* We checked above that we could build the initial vector when
there's a neutral element value. Check here for the case in
which each SLP statement has its own initial value and in which
that value needs to be repeated for every instance of the
statement within the initial vector. */
unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
if (!neutral_op
&& !can_duplicate_and_interleave_p (group_size, elt_mode))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: cannot build"
" initial vector.\n");
return false;
}
/* The epilogue code relies on the number of elements being a multiple
of the group size. The duplicate-and-interleave approach to setting
up the the initial vector does too. */
if (!multiple_p (nunits_out, group_size))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: the vector size"
" is not a multiple of the number of results.\n");
return false;
}
}
/* In case of widenning multiplication by a constant, we update the type
of the constant to be the type of the other operand. We check that the
constant fits the type in the pattern recognition pass. */
if (code == DOT_PROD_EXPR
&& !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
{
if (TREE_CODE (ops[0]) == INTEGER_CST)
ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
else if (TREE_CODE (ops[1]) == INTEGER_CST)
ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"invalid types in dot-prod\n");
return false;
}
}
if (reduction_type == COND_REDUCTION)
{
widest_int ni;
if (! max_loop_iterations (loop, &ni))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop count not known, cannot create cond "
"reduction.\n");
return false;
}
/* Convert backedges to iterations. */
ni += 1;
/* The additional index will be the same type as the condition. Check
that the loop can fit into this less one (because we'll use up the
zero slot for when there are no matches). */
tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
if (wi::geu_p (ni, wi::to_widest (max_index)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop size is greater than data size.\n");
return false;
}
}
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation. */
/* If the reduction is used in an outer loop we need to generate
VF intermediate results, like so (e.g. for ncopies=2):
r0 = phi (init, r0)
r1 = phi (init, r1)
r0 = x0 + r0;
r1 = x1 + r1;
(i.e. we generate VF results in 2 registers).
In this case we have a separate def-use cycle for each copy, and therefore
for each copy we get the vector def for the reduction variable from the
respective phi node created for this copy.
Otherwise (the reduction is unused in the loop nest), we can combine
together intermediate results, like so (e.g. for ncopies=2):
r = phi (init, r)
r = x0 + r;
r = x1 + r;
(i.e. we generate VF/2 results in a single register).
In this case for each copy we get the vector def for the reduction variable
from the vectorized reduction operation generated in the previous iteration.
This only works when we see both the reduction PHI and its only consumer
in vectorizable_reduction and there are no intermediate stmts
participating. */
use_operand_p use_p;
gimple *use_stmt;
if (ncopies > 1
&& (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
&& single_imm_use (gimple_phi_result (reduc_def_stmt), &use_p, &use_stmt)
&& (use_stmt == stmt
|| STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == stmt))
{
single_defuse_cycle = true;
epilog_copies = 1;
}
else
epilog_copies = ncopies;
/* If the reduction stmt is one of the patterns that have lane
reduction embedded we cannot handle the case of ! single_defuse_cycle. */
if ((ncopies > 1
&& ! single_defuse_cycle)
&& (code == DOT_PROD_EXPR
|| code == WIDEN_SUM_EXPR
|| code == SAD_EXPR))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multi def-use cycle not possible for lane-reducing "
"reduction operation\n");
return false;
}
if (slp_node)
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
else
vec_num = 1;
internal_fn cond_fn = get_conditional_internal_fn (code);
vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
if (!vec_stmt) /* transformation not required. */
{
if (first_p)
vect_model_reduction_cost (stmt_info, reduc_fn, ncopies);
if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
if (reduction_type != FOLD_LEFT_REDUCTION
&& (cond_fn == IFN_LAST
|| !direct_internal_fn_supported_p (cond_fn, vectype_in,
OPTIMIZE_FOR_SPEED)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because no"
" conditional operation is available.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (reduc_index == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop for chained"
" reductions.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else
vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
vectype_in);
}
if (dump_enabled_p ()
&& reduction_type == FOLD_LEFT_REDUCTION)
dump_printf_loc (MSG_NOTE, vect_location,
"using an in-order (fold-left) reduction.\n");
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
}
/* Transform. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
/* FORNOW: Multiple types are not supported for condition. */
if (code == COND_EXPR)
gcc_assert (ncopies == 1);
bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
if (reduction_type == FOLD_LEFT_REDUCTION)
return vectorize_fold_left_reduction
(stmt, gsi, vec_stmt, slp_node, reduc_def_stmt, code,
reduc_fn, ops, vectype_in, reduc_index, masks);
if (reduction_type == EXTRACT_LAST_REDUCTION)
{
gcc_assert (!slp_node);
return vectorizable_condition (stmt, gsi, vec_stmt,
NULL, reduc_index, NULL);
}
/* Create the destination vector */
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
prev_stmt_info = NULL;
prev_phi_info = NULL;
if (!slp_node)
{
vec_oprnds0.create (1);
vec_oprnds1.create (1);
if (op_type == ternary_op)
vec_oprnds2.create (1);
}
phis.create (vec_num);
vect_defs.create (vec_num);
if (!slp_node)
vect_defs.quick_push (NULL_TREE);
if (slp_node)
phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
else
phis.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt)));
for (j = 0; j < ncopies; j++)
{
if (code == COND_EXPR)
{
gcc_assert (!slp_node);
vectorizable_condition (stmt, gsi, vec_stmt,
PHI_RESULT (phis[0]),
reduc_index, NULL);
/* Multiple types are not supported for condition. */
break;
}
/* Handle uses. */
if (j == 0)
{
if (slp_node)
{
/* Get vec defs for all the operands except the reduction index,
ensuring the ordering of the ops in the vector is kept. */
auto_vec<tree, 3> slp_ops;
auto_vec<vec<tree>, 3> vec_defs;
slp_ops.quick_push (ops[0]);
slp_ops.quick_push (ops[1]);
if (op_type == ternary_op)
slp_ops.quick_push (ops[2]);
vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
vec_oprnds0.safe_splice (vec_defs[0]);
vec_defs[0].release ();
vec_oprnds1.safe_splice (vec_defs[1]);
vec_defs[1].release ();
if (op_type == ternary_op)
{
vec_oprnds2.safe_splice (vec_defs[2]);
vec_defs[2].release ();
}
}
else
{
vec_oprnds0.quick_push
(vect_get_vec_def_for_operand (ops[0], stmt));
vec_oprnds1.quick_push
(vect_get_vec_def_for_operand (ops[1], stmt));
if (op_type == ternary_op)
vec_oprnds2.quick_push
(vect_get_vec_def_for_operand (ops[2], stmt));
}
}
else
{
if (!slp_node)
{
gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
if (single_defuse_cycle && reduc_index == 0)
vec_oprnds0[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds0[0]
= vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]);
if (single_defuse_cycle && reduc_index == 1)
vec_oprnds1[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds1[0]
= vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]);
if (op_type == ternary_op)
{
if (single_defuse_cycle && reduc_index == 2)
vec_oprnds2[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds2[0]
= vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]);
}
}
}
FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
if (masked_loop_p)
{
/* Make sure that the reduction accumulator is vop[0]. */
if (reduc_index == 1)
{
gcc_assert (commutative_tree_code (code));
std::swap (vop[0], vop[1]);
}
tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
vectype_in, i * ncopies + j);
gcall *call = gimple_build_call_internal (cond_fn, 3, mask,
vop[0], vop[1]);
new_temp = make_ssa_name (vec_dest, call);
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt = call;
}
else
{
if (op_type == ternary_op)
vop[2] = vec_oprnds2[i];
new_temp = make_ssa_name (vec_dest, new_stmt);
new_stmt = gimple_build_assign (new_temp, code,
vop[0], vop[1], vop[2]);
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
{
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
vect_defs.quick_push (new_temp);
}
else
vect_defs[0] = new_temp;
}
if (slp_node)
continue;
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
/* Finalize the reduction-phi (set its arguments) and create the
epilog reduction code. */
if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
vect_defs[0] = gimple_get_lhs (*vec_stmt);
vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt,
epilog_copies, reduc_fn, phis,
double_reduc, slp_node, slp_node_instance,
cond_reduc_val, cond_reduc_op_code,
neutral_op);
return true;
}
/* Function vect_min_worthwhile_factor.
For a loop where we could vectorize the operation indicated by CODE,
return the minimum vectorization factor that makes it worthwhile
to use generic vectors. */
static unsigned int
vect_min_worthwhile_factor (enum tree_code code)
{
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case NEGATE_EXPR:
return 4;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_NOT_EXPR:
return 2;
default:
return INT_MAX;
}
}
/* Return true if VINFO indicates we are doing loop vectorization and if
it is worth decomposing CODE operations into scalar operations for
that loop's vectorization factor. */
bool
vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
unsigned HOST_WIDE_INT value;
return (loop_vinfo
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
&& value >= vect_min_worthwhile_factor (code));
}
/* Function vectorizable_induction
Check if PHI performs an induction computation that can be vectorized.
If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
phi to replace it, put it in VEC_STMT, and add it to the same basic block.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
vectorizable_induction (gimple *phi,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
gimple **vec_stmt, slp_tree slp_node)
{
stmt_vec_info stmt_info = vinfo_for_stmt (phi);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
bool nested_in_vect_loop = false;
struct loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
tree new_vec, vec_init, vec_step, t;
tree new_name;
gimple *new_stmt;
gphi *induction_phi;
tree induc_def, vec_dest;
tree init_expr, step_expr;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned i;
tree expr;
gimple_seq stmts;
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple *exit_phi;
edge latch_e;
tree loop_arg;
gimple_stmt_iterator si;
basic_block bb = gimple_bb (phi);
if (gimple_code (phi) != GIMPLE_PHI)
return false;
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
/* Make sure it was recognized as induction computation. */
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
return false;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype);
gcc_assert (ncopies >= 1);
/* FORNOW. These restrictions should be relaxed. */
if (nested_in_vect_loop_p (loop, phi))
{
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple *exit_phi;
edge latch_e;
tree loop_arg;
if (ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.\n");
return false;
}
/* FORNOW: outer loop induction with SLP not supported. */
if (STMT_SLP_TYPE (stmt_info))
return false;
exit_phi = NULL;
latch_e = loop_latch_edge (loop->inner);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
{
exit_phi = use_stmt;
break;
}
}
if (exit_phi)
{
stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner-loop induction only used outside "
"of the outer vectorized loop.\n");
return false;
}
}
nested_in_vect_loop = true;
iv_loop = loop->inner;
}
else
iv_loop = loop;
gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
if (slp_node && !nunits.is_constant ())
{
/* The current SLP code creates the initial value element-by-element. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP induction not supported for variable-length"
" vectors.\n");
return false;
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_induction ===\n");
vect_model_induction_cost (stmt_info, ncopies);
return true;
}
/* Transform. */
/* Compute a vector variable, initialized with the first VF values of
the induction variable. E.g., for an iv with IV_PHI='X' and
evolution S, for a vector of 4 units, we want to compute:
[X, X + S, X + 2*S, X + 3*S]. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
latch_e = loop_latch_edge (iv_loop);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
gcc_assert (step_expr != NULL_TREE);
pe = loop_preheader_edge (iv_loop);
init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
loop_preheader_edge (iv_loop));
stmts = NULL;
if (!nested_in_vect_loop)
{
/* Convert the initial value to the desired type. */
tree new_type = TREE_TYPE (vectype);
init_expr = gimple_convert (&stmts, new_type, init_expr);
/* If we are using the loop mask to "peel" for alignment then we need
to adjust the start value here. */
tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
if (skip_niters != NULL_TREE)
{
if (FLOAT_TYPE_P (vectype))
skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
skip_niters);
else
skip_niters = gimple_convert (&stmts, new_type, skip_niters);
tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
skip_niters, step_expr);
init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
init_expr, skip_step);
}
}
/* Convert the step to the desired type. */
step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
/* Find the first insertion point in the BB. */
si = gsi_after_labels (bb);
/* For SLP induction we have to generate several IVs as for example
with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
[i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
[VF*S, VF*S, VF*S, VF*S] for all. */
if (slp_node)
{
/* Enforced above. */
unsigned int const_nunits = nunits.to_constant ();
/* Generate [VF*S, VF*S, ... ]. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vf);
expr = fold_convert (TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vf);
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
/* Now generate the IVs. */
unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
unsigned elts = const_nunits * nvects;
unsigned nivs = least_common_multiple (group_size,
const_nunits) / const_nunits;
gcc_assert (elts % group_size == 0);
tree elt = init_expr;
unsigned ivn;
for (ivn = 0; ivn < nivs; ++ivn)
{
tree_vector_builder elts (vectype, const_nunits, 1);
stmts = NULL;
for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
{
if (ivn*const_nunits + eltn >= group_size
&& (ivn * const_nunits + eltn) % group_size == 0)
elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
elt, step_expr);
elts.quick_push (elt);
}
vec_init = gimple_build_vector (&stmts, &elts);
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
/* Create the induction-phi that defines the induction-operand. */
vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_vinfo_for_stmt (induction_phi,
new_stmt_vec_info (induction_phi, loop_vinfo));
induc_def = PHI_RESULT (induction_phi);
/* Create the iv update inside the loop */
vec_def = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
}
/* Re-use IVs when we can. */
if (ivn < nvects)
{
unsigned vfp
= least_common_multiple (group_size, const_nunits) / group_size;
/* Generate [VF'*S, VF'*S, ... ]. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vfp);
expr = fold_convert (TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vfp);
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
for (; ivn < nvects; ++ivn)
{
gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
tree def;
if (gimple_code (iv) == GIMPLE_PHI)
def = gimple_phi_result (iv);
else
def = gimple_assign_lhs (iv);
new_stmt = gimple_build_assign (make_ssa_name (vectype),
PLUS_EXPR,
def, vec_step);
if (gimple_code (iv) == GIMPLE_PHI)
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
else
{
gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
}
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
}
return true;
}
/* Create the vector that holds the initial_value of the induction. */
if (nested_in_vect_loop)
{
/* iv_loop is nested in the loop to be vectorized. init_expr had already
been created during vectorization of previous stmts. We obtain it
from the STMT_VINFO_VEC_STMT of the defining stmt. */
vec_init = vect_get_vec_def_for_operand (init_expr, phi);
/* If the initial value is not of proper type, convert it. */
if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
{
new_stmt
= gimple_build_assign (vect_get_new_ssa_name (vectype,
vect_simple_var,
"vec_iv_"),
VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR, vectype,
vec_init));
vec_init = gimple_assign_lhs (new_stmt);
new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
new_stmt);
gcc_assert (!new_bb);
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
}
}
else
{
/* iv_loop is the loop to be vectorized. Create:
vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
stmts = NULL;
new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
unsigned HOST_WIDE_INT const_nunits;
if (nunits.is_constant (&const_nunits))
{
tree_vector_builder elts (vectype, const_nunits, 1);
elts.quick_push (new_name);
for (i = 1; i < const_nunits; i++)
{
/* Create: new_name_i = new_name + step_expr */
new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
new_name, step_expr);
elts.quick_push (new_name);
}
/* Create a vector from [new_name_0, new_name_1, ...,
new_name_nunits-1] */
vec_init = gimple_build_vector (&stmts, &elts);
}
else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
/* Build the initial value directly from a VEC_SERIES_EXPR. */
vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
new_name, step_expr);
else
{
/* Build:
[base, base, base, ...]
+ (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
gcc_assert (flag_associative_math);
tree index = build_index_vector (vectype, 0, 1);
tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
new_name);
tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
step_expr);
vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
vec_init, step_vec);
vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
vec_init, base_vec);
}
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
}
/* Create the vector that holds the step of the induction. */
if (nested_in_vect_loop)
/* iv_loop is nested in the loop to be vectorized. Generate:
vec_step = [S, S, S, S] */
new_name = step_expr;
else
{
/* iv_loop is the loop to be vectorized. Generate:
vec_step = [VF*S, VF*S, VF*S, VF*S] */
gimple_seq seq = NULL;
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vf);
expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vf);
new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (seq)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
gcc_assert (!new_bb);
}
}
t = unshare_expr (new_name);
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
/* Create the following def-use cycle:
loop prolog:
vec_init = ...
vec_step = ...
loop:
vec_iv = PHI <vec_init, vec_loop>
...
STMT
...
vec_loop = vec_iv + vec_step; */
/* Create the induction-phi that defines the induction-operand. */
vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_vinfo_for_stmt (induction_phi,
new_stmt_vec_info (induction_phi, loop_vinfo));
induc_def = PHI_RESULT (induction_phi);
/* Create the iv update inside the loop */
vec_def = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi;
/* In case that vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation. */
if (ncopies > 1)
{
gimple_seq seq = NULL;
stmt_vec_info prev_stmt_vinfo;
/* FORNOW. This restriction should be relaxed. */
gcc_assert (!nested_in_vect_loop);
/* Create the vector that holds the step of the induction. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, nunits);
expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), nunits);
new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (seq)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
gcc_assert (!new_bb);
}
t = unshare_expr (new_name);
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
for (i = 1; i < ncopies; i++)
{
/* vec_i = vec_prev + vec_step */
new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
vec_def, vec_step);
vec_def = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, vec_def);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
}
}
if (nested_in_vect_loop)
{
/* Find the loop-closed exit-phi of the induction, and record
the final vector of induction results: */
exit_phi = NULL;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
{
exit_phi = use_stmt;
break;
}
}
if (exit_phi)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
/* FORNOW. Currently not supporting the case that an inner-loop induction
is not used in the outer-loop (i.e. only outside the outer-loop). */
gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
&& !STMT_VINFO_LIVE_P (stmt_vinfo));
STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vector of inductions after inner-loop:");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
}
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform induction: created def-use cycle: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
SSA_NAME_DEF_STMT (vec_def), 0);
}
return true;
}
/* Function vectorizable_live_operation.
STMT computes a value that is used outside the loop. Check if
it can be supported. */
bool
vectorizable_live_operation (gimple *stmt,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
slp_tree slp_node, int slp_index,
gimple **vec_stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
tree lhs, lhs_type, bitsize, vec_bitsize;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies;
gimple *use_stmt;
auto_vec<tree> vec_oprnds;
int vec_entry = 0;
poly_uint64 vec_index = 0;
gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
return false;
/* FORNOW. CHECKME. */
if (nested_in_vect_loop_p (loop, stmt))
return false;
/* If STMT is not relevant and it is a simple assignment and its inputs are
invariant then it can remain in place, unvectorized. The original last
scalar value that it computes will be used. */
if (!STMT_VINFO_RELEVANT_P (stmt_info))
{
gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"statement is simple and uses invariant. Leaving in "
"place.\n");
return true;
}
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype);
if (slp_node)
{
gcc_assert (slp_index >= 0);
int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
/* Get the last occurrence of the scalar index from the concatenation of
all the slp vectors. Calculate which slp vector it is and the index
within. */
poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
/* Calculate which vector contains the result, and which lane of
that vector we need. */
if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Cannot determine which vector holds the"
" final result.\n");
return false;
}
}
if (!vec_stmt)
{
/* No transformation required. */
if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because "
"the target doesn't support extract last "
"reduction.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (slp_node)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because an "
"SLP statement is live after the loop.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because"
" ncopies is greater than 1.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else
{
gcc_assert (ncopies == 1 && !slp_node);
vect_record_loop_mask (loop_vinfo,
&LOOP_VINFO_MASKS (loop_vinfo),
1, vectype);
}
}
return true;
}
/* If stmt has a related stmt, then use that for getting the lhs. */
if (is_pattern_stmt_p (stmt_info))
stmt = STMT_VINFO_RELATED_STMT (stmt_info);
lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
: gimple_get_lhs (stmt);
lhs_type = TREE_TYPE (lhs);
bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
: TYPE_SIZE (TREE_TYPE (vectype)));
vec_bitsize = TYPE_SIZE (vectype);
/* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
tree vec_lhs, bitstart;
if (slp_node)
{
gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
/* Get the correct slp vectorized stmt. */
gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry];
if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
vec_lhs = gimple_phi_result (phi);
else
vec_lhs = gimple_get_lhs (vec_stmt);
/* Get entry to use. */
bitstart = bitsize_int (vec_index);
bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
}
else
{
enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt);
gcc_checking_assert (ncopies == 1
|| !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
/* For multiple copies, get the last copy. */
for (int i = 1; i < ncopies; ++i)
vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
vec_lhs);
/* Get the last lane in the vector. */
bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
}
gimple_seq stmts = NULL;
tree new_tree;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
/* Emit:
SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
where VEC_LHS is the vectorized live-out result and MASK is
the loop mask for the final iteration. */
gcc_assert (ncopies == 1 && !slp_node);
tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
tree scalar_res = make_ssa_name (scalar_type);
tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
1, vectype, 0);
gcall *new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST,
2, mask, vec_lhs);
gimple_call_set_lhs (new_stmt, scalar_res);
gimple_seq_add_stmt (&stmts, new_stmt);
/* Convert the extracted vector element to the required scalar type. */
new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
}
else
{
tree bftype = TREE_TYPE (vectype);
if (VECTOR_BOOLEAN_TYPE_P (vectype))
bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
&stmts, true, NULL_TREE);
}
if (stmts)
gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
/* Replace use of lhs with newly computed result. If the use stmt is a
single arg PHI, just replace all uses of PHI result. It's necessary
because lcssa PHI defining lhs may be before newly inserted stmt. */
use_operand_p use_p;
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
&& !is_gimple_debug (use_stmt))
{
if (gimple_code (use_stmt) == GIMPLE_PHI
&& gimple_phi_num_args (use_stmt) == 1)
{
replace_uses_by (gimple_phi_result (use_stmt), new_tree);
}
else
{
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, new_tree);
}
update_stmt (use_stmt);
}
return true;
}
/* Kill any debug uses outside LOOP of SSA names defined in STMT. */
static void
vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
{
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
def_operand_p def_p;
gimple *ustmt;
FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
{
FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
{
basic_block bb;
if (!is_gimple_debug (ustmt))
continue;
bb = gimple_bb (ustmt);
if (!flow_bb_inside_loop_p (loop, bb))
{
if (gimple_debug_bind_p (ustmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"killing debug use\n");
gimple_debug_bind_reset_value (ustmt);
update_stmt (ustmt);
}
else
gcc_unreachable ();
}
}
}
}
/* Given loop represented by LOOP_VINFO, return true if computation of
LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
otherwise. */
static bool
loop_niters_no_overflow (loop_vec_info loop_vinfo)
{
/* Constant case. */
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
return true;
}
widest_int max;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Check the upper bound of loop niters. */
if (get_max_loop_iterations (loop, &max))
{
tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
signop sgn = TYPE_SIGN (type);
widest_int type_max = widest_int::from (wi::max_value (type), sgn);
if (max < type_max)
return true;
}
return false;
}
/* Return a mask type with half the number of elements as TYPE. */
tree
vect_halve_mask_nunits (tree type)
{
poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
return build_truth_vector_type (nunits, current_vector_size);
}
/* Return a mask type with twice as many elements as TYPE. */
tree
vect_double_mask_nunits (tree type)
{
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
return build_truth_vector_type (nunits, current_vector_size);
}
/* Record that a fully-masked version of LOOP_VINFO would need MASKS to
contain a sequence of NVECTORS masks that each control a vector of type
VECTYPE. */
void
vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
unsigned int nvectors, tree vectype)
{
gcc_assert (nvectors != 0);
if (masks->length () < nvectors)
masks->safe_grow_cleared (nvectors);
rgroup_masks *rgm = &(*masks)[nvectors - 1];
/* The number of scalars per iteration and the number of vectors are
both compile-time constants. */
unsigned int nscalars_per_iter
= exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
if (rgm->max_nscalars_per_iter < nscalars_per_iter)
{
rgm->max_nscalars_per_iter = nscalars_per_iter;
rgm->mask_type = build_same_sized_truth_vector_type (vectype);
}
}
/* Given a complete set of masks MASKS, extract mask number INDEX
for an rgroup that operates on NVECTORS vectors of type VECTYPE,
where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
See the comment above vec_loop_masks for more details about the mask
arrangement. */
tree
vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
unsigned int nvectors, tree vectype, unsigned int index)
{
rgroup_masks *rgm = &(*masks)[nvectors - 1];
tree mask_type = rgm->mask_type;
/* Populate the rgroup's mask array, if this is the first time we've
used it. */
if (rgm->masks.is_empty ())
{
rgm->masks.safe_grow_cleared (nvectors);
for (unsigned int i = 0; i < nvectors; ++i)
{
tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
/* Provide a dummy definition until the real one is available. */
SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
rgm->masks[i] = mask;
}
}
tree mask = rgm->masks[index];
if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
/* A loop mask for data type X can be reused for data type Y
if X has N times more elements than Y and if Y's elements
are N times bigger than X's. In this case each sequence
of N elements in the loop mask will be all-zero or all-one.
We can then view-convert the mask so that each sequence of
N elements is replaced by a single element. */
gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)));
gimple_seq seq = NULL;
mask_type = build_same_sized_truth_vector_type (vectype);
mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
if (seq)
gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
}
return mask;
}
/* Scale profiling counters by estimation for LOOP which is vectorized
by factor VF. */
static void
scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
{
edge preheader = loop_preheader_edge (loop);
/* Reduce loop iterations by the vectorization factor. */
gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
profile_count freq_h = loop->header->count, freq_e = preheader->count ();
if (freq_h.nonzero_p ())
{
profile_probability p;
/* Avoid dropping loop body profile counter to 0 because of zero count
in loop's preheader. */
if (!(freq_e == profile_count::zero ()))
freq_e = freq_e.force_nonzero ();
p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
scale_loop_frequencies (loop, p);
}
edge exit_e = single_exit (loop);
exit_e->probability = profile_probability::always ()
.apply_scale (1, new_est_niter + 1);
edge exit_l = single_pred_edge (loop->latch);
profile_probability prob = exit_l->probability;
exit_l->probability = exit_e->probability.invert ();
if (prob.initialized_p () && exit_l->probability.initialized_p ())
scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
}
/* Function vect_transform_loop.
The analysis phase has determined that the loop is vectorizable.
Vectorize the loop - created vectorized stmts to replace the scalar
stmts in the loop, and update the loop exit condition.
Returns scalar epilogue loop if any. */
struct loop *
vect_transform_loop (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct loop *epilogue = NULL;
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
tree niters_vector = NULL_TREE;
tree step_vector = NULL_TREE;
tree niters_vector_mult_vf = NULL_TREE;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned int lowest_vf = constant_lower_bound (vf);
bool grouped_store;
bool slp_scheduled = false;
gimple *stmt, *pattern_stmt;
gimple_seq pattern_def_seq = NULL;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool transform_pattern_stmt = false;
bool check_profitability = false;
unsigned int th;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
/* Use the more conservative vectorization threshold. If the number
of iterations is constant assume the cost check has been performed
by our caller. If the threshold makes all loops profitable that
run at least the (estimated) vectorization factor number of times
checking is pointless, too. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
if (th >= vect_vf_for_cost (loop_vinfo)
&& !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Profitability threshold is %d loop iterations.\n",
th);
check_profitability = true;
}
/* Make sure there exists a single-predecessor exit bb. Do this before
versioning. */
edge e = single_exit (loop);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge\n");
}
/* Version the loop first, if required, so the profitability check
comes first. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
poly_uint64 versioning_threshold
= LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
if (check_profitability
&& ordered_p (poly_uint64 (th), versioning_threshold))
{
versioning_threshold = ordered_max (poly_uint64 (th),
versioning_threshold);
check_profitability = false;
}
vect_loop_versioning (loop_vinfo, th, check_profitability,
versioning_threshold);
check_profitability = false;
}
/* Make sure there exists a single-predecessor exit bb also on the
scalar loop copy. Do this after versioning but before peeling
so CFG structure is fine for both scalar and if-converted loop
to make slpeel_duplicate_current_defs_from_edges face matched
loop closed PHI nodes on the exit. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
{
e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
}
}
tree niters = vect_build_loop_niters (loop_vinfo);
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
&step_vector, &niters_vector_mult_vf, th,
check_profitability, niters_no_overflow);
if (niters_vector == NULL_TREE)
{
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& known_eq (lowest_vf, vf))
{
niters_vector
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
step_vector = build_one_cst (TREE_TYPE (niters));
}
else
vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
&step_vector, niters_no_overflow);
}
/* 1) Make sure the loop header has exactly two entries
2) Make sure we have a preheader basic block. */
gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
split_edge (loop_preheader_edge (loop));
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& vect_use_loop_mask_for_alignment_p (loop_vinfo))
/* This will deal with any possible peeling. */
vect_prepare_for_masked_peels (loop_vinfo);
/* FORNOW: the vectorizer supports only loops which body consist
of one basic block (header + empty latch). When the vectorizer will
support more involved loop forms, the order by which the BBs are
traversed need to be reconsidered. */
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
stmt_vec_info stmt_info;
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
gphi *phi = si.phi ();
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
stmt_info = vinfo_for_stmt (phi);
if (!stmt_info)
continue;
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, phi);
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
continue;
if (STMT_VINFO_VECTYPE (stmt_info)
&& (maybe_ne
(TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
&& dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
}
}
pattern_stmt = NULL;
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si) || transform_pattern_stmt;)
{
bool is_store;
if (transform_pattern_stmt)
stmt = pattern_stmt;
else
{
stmt = gsi_stmt (si);
/* During vectorization remove existing clobber stmts. */
if (gimple_clobber_p (stmt))
{
unlink_stmt_vdef (stmt);
gsi_remove (&si, true);
release_defs (stmt);
continue;
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
stmt_info = vinfo_for_stmt (stmt);
/* vector stmts created in the outer-loop during vectorization of
stmts in an inner-loop may not have a stmt_info, and do not
need to be vectorized. */
if (!stmt_info)
{
gsi_next (&si);
continue;
}
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (stmt);
}
else
{
gsi_next (&si);
continue;
}
}
else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
transform_pattern_stmt = true;
/* If pattern statement has def stmts, vectorize them too. */
if (is_pattern_stmt_p (stmt_info))
{
if (pattern_def_seq == NULL)
{
pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
pattern_def_si = gsi_start (pattern_def_seq);
}
else if (!gsi_end_p (pattern_def_si))
gsi_next (&pattern_def_si);
if (pattern_def_seq != NULL)
{
gimple *pattern_def_stmt = NULL;
stmt_vec_info pattern_def_stmt_info = NULL;
while (!gsi_end_p (pattern_def_si))
{
pattern_def_stmt = gsi_stmt (pattern_def_si);
pattern_def_stmt_info
= vinfo_for_stmt (pattern_def_stmt);
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
|| STMT_VINFO_LIVE_P (pattern_def_stmt_info))
break;
gsi_next (&pattern_def_si);
}
if (!gsi_end_p (pattern_def_si))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> vectorizing pattern def "
"stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
}
stmt = pattern_def_stmt;
stmt_info = pattern_def_stmt_info;
}
else
{
pattern_def_si = gsi_none ();
transform_pattern_stmt = false;
}
}
else
transform_pattern_stmt = false;
}
if (STMT_VINFO_VECTYPE (stmt_info))
{
poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
&& maybe_ne (nunits, vf)
&& dump_enabled_p ())
/* For SLP VF is set according to unrolling factor, and not
to vector size, hence for SLP this print is not valid. */
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
}
/* SLP. Schedule all the SLP instances when the first SLP stmt is
reached. */
if (STMT_SLP_TYPE (stmt_info))
{
if (!slp_scheduled)
{
slp_scheduled = true;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== scheduling SLP instances ===\n");
vect_schedule_slp (loop_vinfo);
}
/* Hybrid SLP stmts must be vectorized in addition to SLP. */
if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
{
if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
}
/* -------- vectorize statement ------------ */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
grouped_store = false;
is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
if (is_store)
{
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* Interleaving. If IS_STORE is TRUE, the vectorization of the
interleaving chain was completed - free all the stores in
the chain. */
gsi_next (&si);
vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
}
else
{
/* Free the attached stmt_vec_info and remove the stmt. */
gimple *store = gsi_stmt (si);
free_stmt_vec_info (store);
unlink_stmt_vdef (store);
gsi_remove (&si, true);
release_defs (store);
}
/* Stores can only appear at the end of pattern statements. */
gcc_assert (!transform_pattern_stmt);
pattern_def_seq = NULL;
}
else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
} /* stmts in BB */
/* Stub out scalar statements that must not survive vectorization.
Doing this here helps with grouped statements, or statements that
are involved in patterns. */
for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
!gsi_end_p (gsi); gsi_next (&gsi))
{
gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
{
tree lhs = gimple_get_lhs (call);
if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
{
tree zero = build_zero_cst (TREE_TYPE (lhs));
gimple *new_stmt = gimple_build_assign (lhs, zero);
gsi_replace (&gsi, new_stmt, true);
}
}
}
} /* BBs in loop */
/* The vectorization factor is always > 1, so if we use an IV increment of 1.
a zero NITERS becomes a nonzero NITERS_VECTOR. */
if (integer_onep (step_vector))
niters_no_overflow = true;
vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
niters_vector_mult_vf, !niters_no_overflow);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
scale_profile_for_vect_loop (loop, assumed_vf);
/* True if the final iteration might not handle a full vector's
worth of scalar iterations. */
bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
/* The minimum number of iterations performed by the epilogue. This
is 1 when peeling for gaps because we always need a final scalar
iteration. */
int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
/* +1 to convert latch counts to loop iteration counts,
-min_epilogue_iters to remove iterations that cannot be performed
by the vector code. */
int bias_for_lowest = 1 - min_epilogue_iters;
int bias_for_assumed = bias_for_lowest;
int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
/* When the amount of peeling is known at compile time, the first
iteration will have exactly alignment_npeels active elements.
In the worst case it will have at least one. */
int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
bias_for_lowest += lowest_vf - min_first_active;
bias_for_assumed += assumed_vf - min_first_active;
}
/* In these calculations the "- 1" converts loop iteration counts
back to latch counts. */
if (loop->any_upper_bound)
loop->nb_iterations_upper_bound
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
lowest_vf) - 1
: wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
lowest_vf) - 1);
if (loop->any_likely_upper_bound)
loop->nb_iterations_likely_upper_bound
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
+ bias_for_lowest, lowest_vf) - 1
: wi::udiv_floor (loop->nb_iterations_likely_upper_bound
+ bias_for_lowest, lowest_vf) - 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1
: wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1);
if (dump_enabled_p ())
{
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
dump_printf_loc (MSG_NOTE, vect_location,
"LOOP VECTORIZED\n");
if (loop->inner)
dump_printf_loc (MSG_NOTE, vect_location,
"OUTER LOOP VECTORIZED\n");
dump_printf (MSG_NOTE, "\n");
}
else
{
dump_printf_loc (MSG_NOTE, vect_location,
"LOOP EPILOGUE VECTORIZED (VS=");
dump_dec (MSG_NOTE, current_vector_size);
dump_printf (MSG_NOTE, ")\n");
}
}
/* Free SLP instances here because otherwise stmt reference counting
won't work. */
slp_instance instance;
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
vect_free_slp_instance (instance);
LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
/* Clear-up safelen field since its value is invalid after vectorization
since vectorized loop can have loop-carried dependencies. */
loop->safelen = 0;
/* Don't vectorize epilogue for epilogue. */
if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
epilogue = NULL;
if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
epilogue = NULL;
if (epilogue)
{
auto_vector_sizes vector_sizes;
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
&& known_eq (vf, lowest_vf))
{
unsigned int eiters
= (LOOP_VINFO_INT_NITERS (loop_vinfo)
- LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
eiters = eiters % lowest_vf;
epilogue->nb_iterations_upper_bound = eiters - 1;
unsigned int ratio;
while (next_size < vector_sizes.length ()
&& !(constant_multiple_p (current_vector_size,
vector_sizes[next_size], &ratio)
&& eiters >= lowest_vf / ratio))
next_size += 1;
}
else
while (next_size < vector_sizes.length ()
&& maybe_lt (current_vector_size, vector_sizes[next_size]))
next_size += 1;
if (next_size == vector_sizes.length ())
epilogue = NULL;
}
if (epilogue)
{
epilogue->force_vectorize = loop->force_vectorize;
epilogue->safelen = loop->safelen;
epilogue->dont_vectorize = false;
/* We may need to if-convert epilogue to vectorize it. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
tree_if_conversion (epilogue);
}
return epilogue;
}
/* The code below is trying to perform simple optimization - revert
if-conversion for masked stores, i.e. if the mask of a store is zero
do not perform it and all stored value producers also if possible.
For example,
for (i=0; i<n; i++)
if (c[i])
{
p1[i] += 1;
p2[i] = p3[i] +2;
}
this transformation will produce the following semi-hammock:
if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
{
vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
vect__12.22_172 = vect__11.19_170 + vect_cst__171;
MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
vect__19.28_184 = vect__18.25_182 + vect_cst__183;
MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
}
*/
void
optimize_mask_stores (struct loop *loop)
{
basic_block *bbs = get_loop_body (loop);
unsigned nbbs = loop->num_nodes;
unsigned i;
basic_block bb;
struct loop *bb_loop;
gimple_stmt_iterator gsi;
gimple *stmt;
auto_vec<gimple *> worklist;
vect_location = find_loop_location (loop);
/* Pick up all masked stores in loop if any. */
for (i = 0; i < nbbs; i++)
{
bb = bbs[i];
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
worklist.safe_push (stmt);
}
}
free (bbs);
if (worklist.is_empty ())
return;
/* Loop has masked stores. */
while (!worklist.is_empty ())
{
gimple *last, *last_store;
edge e, efalse;
tree mask;
basic_block store_bb, join_bb;
gimple_stmt_iterator gsi_to;
tree vdef, new_vdef;
gphi *phi;
tree vectype;
tree zero;
last = worklist.pop ();
mask = gimple_call_arg (last, 2);
bb = gimple_bb (last);
/* Create then_bb and if-then structure in CFG, then_bb belongs to
the same loop as if_bb. It could be different to LOOP when two
level loop-nest is vectorized and mask_store belongs to the inner
one. */
e = split_block (bb, last);
bb_loop = bb->loop_father;
gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
join_bb = e->dest;
store_bb = create_empty_bb (bb);
add_bb_to_loop (store_bb, bb_loop);
e->flags = EDGE_TRUE_VALUE;
efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
/* Put STORE_BB to likely part. */
efalse->probability = profile_probability::unlikely ();
store_bb->count = efalse->count ();
make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
if (dom_info_available_p (CDI_DOMINATORS))
set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Create new block %d to sink mask stores.",
store_bb->index);
/* Create vector comparison with boolean result. */
vectype = TREE_TYPE (mask);
zero = build_zero_cst (vectype);
stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
gsi = gsi_last_bb (bb);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
/* Create new PHI node for vdef of the last masked store:
.MEM_2 = VDEF <.MEM_1>
will be converted to
.MEM.3 = VDEF <.MEM_1>
and new PHI node will be created in join bb
.MEM_2 = PHI <.MEM_1, .MEM_3>
*/
vdef = gimple_vdef (last);
new_vdef = make_ssa_name (gimple_vop (cfun), last);
gimple_set_vdef (last, new_vdef);
phi = create_phi_node (vdef, join_bb);
add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
/* Put all masked stores with the same mask to STORE_BB if possible. */
while (true)
{
gimple_stmt_iterator gsi_from;
gimple *stmt1 = NULL;
/* Move masked store to STORE_BB. */
last_store = last;
gsi = gsi_for_stmt (last);
gsi_from = gsi;
/* Shift GSI to the previous stmt for further traversal. */
gsi_prev (&gsi);
gsi_to = gsi_start_bb (store_bb);
gsi_move_before (&gsi_from, &gsi_to);
/* Setup GSI_TO to the non-empty block start. */
gsi_to = gsi_start_bb (store_bb);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
}
/* Move all stored value producers if possible. */
while (!gsi_end_p (gsi))
{
tree lhs;
imm_use_iterator imm_iter;
use_operand_p use_p;
bool res;
/* Skip debug statements. */
if (is_gimple_debug (gsi_stmt (gsi)))
{
gsi_prev (&gsi);
continue;
}
stmt1 = gsi_stmt (gsi);
/* Do not consider statements writing to memory or having
volatile operand. */
if (gimple_vdef (stmt1)
|| gimple_has_volatile_ops (stmt1))
break;
gsi_from = gsi;
gsi_prev (&gsi);
lhs = gimple_get_lhs (stmt1);
if (!lhs)
break;
/* LHS of vectorized stmt must be SSA_NAME. */
if (TREE_CODE (lhs) != SSA_NAME)
break;
if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
{
/* Remove dead scalar statement. */
if (has_zero_uses (lhs))
{
gsi_remove (&gsi_from, true);
continue;
}
}
/* Check that LHS does not have uses outside of STORE_BB. */
res = true;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
gimple *use_stmt;
use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (gimple_bb (use_stmt) != store_bb)
{
res = false;
break;
}
}
if (!res)
break;
if (gimple_vuse (stmt1)
&& gimple_vuse (stmt1) != gimple_vuse (last_store))
break;
/* Can move STMT1 to STORE_BB. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
}
gsi_move_before (&gsi_from, &gsi_to);
/* Shift GSI_TO for further insertion. */
gsi_prev (&gsi_to);
}
/* Put other masked stores with the same mask to STORE_BB. */
if (worklist.is_empty ()
|| gimple_call_arg (worklist.last (), 2) != mask
|| worklist.last () != stmt1)
break;
last = worklist.pop ();
}
add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
}
}
|
matrix_vector_functions_intel_mkl.c | /* high level matrix/vector functions using Intel MKL for blas */
#include "matrix_vector_functions_intel_mkl.h"
#include "mkl_scalapack.h"
/* initialize new matrix and set all entries to zero */
mat * matrix_new(int nrows, int ncols)
{
mat *M;
M = (mat *)malloc(sizeof(mat));
//M->d = (double*)mkl_calloc(nrows*ncols, sizeof(double), 64);
M->d = (double*)calloc(nrows*ncols, sizeof(double));
M->nrows = nrows;
M->ncols = ncols;
return M;
}
/* initialize new vector and set all entries to zero */
vec * vector_new(int nrows)
{
vec *v;
v =(vec *) malloc(sizeof(vec));
//v->d = (double*)mkl_calloc(nrows,sizeof(double), 64);
v->d = (double*)calloc(nrows,sizeof(double));
v->nrows = nrows;
return v;
}
void matrix_delete(mat *M)
{
//mkl_free(M->d);
free(M->d);
free(M);
}
void vector_delete(vec *v)
{
//mkl_free(v->d);
free(v->d);
free(v);
}
// column major format
void matrix_set_element(mat *M, int row_num, int col_num, double val){
//M->d[row_num*(M->ncols) + col_num] = val;
M->d[col_num*(M->nrows) + row_num] = val;
}
double matrix_get_element(mat *M, int row_num, int col_num){
//return M->d[row_num*(M->ncols) + col_num];
return M->d[col_num*(M->nrows) + row_num];
}
void vector_set_element(vec *v, int row_num, double val){
v->d[row_num] = val;
}
double vector_get_element(vec *v, int row_num){
return v->d[row_num];
}
/* load matrix from binary file
* the nonzeros are in order of double loop over rows and columns
format:
num_rows (int)
num_columns (int)
nnz (double)
...
nnz (double)
*/
mat * matrix_load_from_binary_file(char *fname){
int i, j, num_rows, num_columns, row_num, col_num;
double nnz_val;
size_t one = 1;
FILE *fp;
mat *M;
fp = fopen(fname,"r");
fread(&num_rows,sizeof(int),one,fp); //read m
fread(&num_columns,sizeof(int),one,fp); //read n
printf("initializing M of size %d by %d\n", num_rows, num_columns);
M = matrix_new(num_rows,num_columns);
printf("done..\n");
// read and set elements
for(i=0; i<num_rows; i++){
for(j=0; j<num_columns; j++){
fread(&nnz_val,sizeof(double),one,fp); //read nnz
matrix_set_element(M,i,j,nnz_val);
}
}
fclose(fp);
return M;
}
/* write matrix to binary file
* the nonzeros are in order of double loop over rows and columns
format:
num_rows (int)
num_columns (int)
nnz (double)
...
nnz (double)
*/
void matrix_write_to_binary_file(mat *M, char *fname){
int i, j, num_rows, num_columns, row_num, col_num;
double nnz_val;
size_t one = 1;
FILE *fp;
num_rows = M->nrows; num_columns = M->ncols;
fp = fopen(fname,"w");
fwrite(&num_rows,sizeof(int),one,fp); //write m
fwrite(&num_columns,sizeof(int),one,fp); //write n
// write the elements
for(i=0; i<num_rows; i++){
for(j=0; j<num_columns; j++){
nnz_val = matrix_get_element(M,i,j);
fwrite(&nnz_val,sizeof(double),one,fp); //write nnz
}
}
fclose(fp);
}
void matrix_print(mat * M){
int i,j;
double val;
for(i=0; i<M->nrows; i++){
for(j=0; j<M->ncols; j++){
val = matrix_get_element(M, i, j);
printf("%f ", val);
}
printf("\n");
}
}
void vector_print(vec * v){
int i;
double val;
for(i=0; i<v->nrows; i++){
val = vector_get_element(v, i);
printf("%f\n", val);
}
}
/* v(:) = data */
void vector_set_data(vec *v, double *data){
int i;
#pragma omp parallel shared(v) private(i)
{
#pragma omp for
for(i=0; i<(v->nrows); i++){
v->d[i] = data[i];
}
}
}
/* scale vector by a constant */
void vector_scale(vec *v, double scalar){
int i;
#pragma omp parallel shared(v,scalar) private(i)
{
#pragma omp for
for(i=0; i<(v->nrows); i++){
v->d[i] = scalar*(v->d[i]);
}
}
}
/* scale matrix by a constant */
void matrix_scale(mat *M, double scalar){
int i;
#pragma omp parallel shared(M,scalar) private(i)
{
#pragma omp for
for(i=0; i<((M->nrows)*(M->ncols)); i++){
M->d[i] = scalar*(M->d[i]);
}
}
}
/* copy contents of vec s to d */
void vector_copy(vec *d, vec *s){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(d,s) private(i)
{
#pragma omp for
for(i=0; i<(s->nrows); i++){
d->d[i] = s->d[i];
}
}
}
/* copy contents of mat S to D */
void matrix_copy(mat *D, mat *S){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(D,S) private(i)
{
#pragma omp for
for(i=0; i<((S->nrows)*(S->ncols)); i++){
D->d[i] = S->d[i];
}
}
}
/* hard threshold matrix entries */
void matrix_hard_threshold(mat *M, double TOL){
int i;
#pragma omp parallel shared(M) private(i)
{
#pragma omp for
for(i=0; i<((M->nrows)*(M->ncols)); i++){
if(fabs(M->d[i]) < TOL){
M->d[i] = 0;
}
}
}
}
/* build transpose of matrix : Mt = M^T */
void matrix_build_transpose(mat *Mt, mat *M){
int i,j;
for(i=0; i<(M->nrows); i++){
for(j=0; j<(M->ncols); j++){
matrix_set_element(Mt,j,i,matrix_get_element(M,i,j));
}
}
}
void matrix_build_transpose_debug(mat *Mt, mat *M, int mark){
int i,j;
printf("enter\n");
for(i=0; i<(M->nrows); i++){
if (mark==3) printf("i=%d\n", i);
for(j=0; j<(M->ncols); j++){
// if (mark==3) printf("i=%d, j=%d\n", i, j);
matrix_set_element(Mt,j,i,matrix_get_element(M,i,j));
}
}
}
/* subtract b from a and save result in a */
void vector_sub(vec *a, vec *b){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(a,b) private(i)
{
#pragma omp for
for(i=0; i<(a->nrows); i++){
a->d[i] = a->d[i] - b->d[i];
}
}
}
/* subtract B from A and save result in A */
void matrix_sub(mat *A, mat *B){
int i;
//#pragma omp parallel for
#pragma omp parallel shared(A,B) private(i)
{
#pragma omp for
for(i=0; i<((A->nrows)*(A->ncols)); i++){
A->d[i] = A->d[i] - B->d[i];
}
}
}
/* A = A - u*v where u is a column vec and v is a row vec */
void matrix_sub_column_times_row_vector(mat *A, vec *u, vec *v){
int i,j;
#pragma omp parallel for shared(A,u,v) private(j)
for(i=0; i<(A->nrows); i++){
for(j=0; j<(A->ncols); j++){
matrix_set_element(A,i,j,matrix_get_element(A,i,j) - vector_get_element(u,i)*vector_get_element(v,j));
}
}
}
/* compute euclidean norm of vector */
double vector_get2norm(vec *v){
int i;
double val, normval = 0;
#pragma omp parallel shared(v,normval) private(i,val)
{
#pragma omp for reduction(+:normval)
for(i=0; i<(v->nrows); i++){
val = v->d[i];
normval += val*val;
}
}
return sqrt(normval);
}
/* returns the dot product of two vectors */
double vector_dot_product(vec *u, vec *v){
int i;
double dotval = 0;
#pragma omp parallel shared(u,v,dotval) private(i)
{
#pragma omp for reduction(+:dotval)
for(i=0; i<u->nrows; i++){
dotval += (u->d[i])*(v->d[i]);
}
}
return dotval;
}
/* matrix frobenius norm */
double get_matrix_frobenius_norm(mat *M){
int i;
double val, normval = 0;
#pragma omp parallel shared(M,normval) private(i,val)
{
#pragma omp for reduction(+:normval)
for(i=0; i<((M->nrows)*(M->ncols)); i++){
val = M->d[i];
normval += val*val;
}
}
return sqrt(normval);
}
/* matrix max abs val */
double get_matrix_max_abs_element(mat *M){
int i;
double val, max = 0;
for(i=0; i<((M->nrows)*(M->ncols)); i++){
val = M->d[i];
if( fabs(val) > max )
max = val;
}
return max;
}
/* calculate percent error between A and B
in terms of Frobenius norm: 100*norm(A - B)/norm(A) */
double get_percent_error_between_two_mats(mat *A, mat *B){
int m,n;
double normA, normB, normA_minus_B;
mat *A_minus_B;
m = A->nrows;
n = A->ncols;
A_minus_B = matrix_new(m,n);
matrix_copy(A_minus_B, A);
matrix_sub(A_minus_B, B);
normA = get_matrix_frobenius_norm(A);
normB = get_matrix_frobenius_norm(B);
normA_minus_B = get_matrix_frobenius_norm(A_minus_B);
matrix_delete(A_minus_B);
return 100.0*normA_minus_B/normA;
}
double get_matrix_column_norm_squared(mat *M, int colnum){
int i, m, n;
double val,colnorm;
m = M->nrows;
n = M->ncols;
colnorm = 0;
for(i=0; i<m; i++){
val = matrix_get_element(M,i,colnum);
colnorm += val*val;
}
return colnorm;
}
double matrix_getmaxcolnorm(mat *M){
int i,m,n;
vec *col_vec;
double vecnorm, maxnorm;
m = M->nrows; n = M->ncols;
col_vec = vector_new(m);
maxnorm = 0;
#pragma omp parallel for
for(i=0; i<n; i++){
matrix_get_col(M,i,col_vec);
vecnorm = vector_get2norm(col_vec);
#pragma omp critical
if(vecnorm > maxnorm){
maxnorm = vecnorm;
}
}
vector_delete(col_vec);
return maxnorm;
}
void compute_matrix_column_norms(mat *M, vec *column_norms){
int j;
#pragma omp parallel shared(column_norms,M) private(j)
{
#pragma omp parallel for
for(j=0; j<(M->ncols); j++){
vector_set_element(column_norms,j, get_matrix_column_norm_squared(M,j));
}
}
}
/* initialize a random matrix */
void initialize_random_matrix(mat *M){
int i,m,n;
double val;
m = M->nrows;
n = M->ncols;
float a=0.0,sigma=1.0;
int N = m*n;
float *r;
VSLStreamStatePtr stream;
r = (float*)malloc(N*sizeof(float));
vslNewStream( &stream, BRNG, time(NULL) );
//vslNewStream( &stream, BRNG, SEED );
vsRngGaussian( METHOD, stream, N, r, a, sigma );
// read and set elements
#pragma omp parallel shared(M,N,r) private(i,val)
{
#pragma omp parallel for
for(i=0; i<N; i++){
val = r[i];
M->d[i] = val;
}
}
free(r);
}
/* initialize diagonal matrix from vector data */
void initialize_diagonal_matrix(mat *D, vec *data){
int i;
#pragma omp parallel shared(D) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(D,i,i,data->d[i]);
}
}
}
/* initialize identity */
void initialize_identity_matrix(mat *D){
int i;
matrix_scale(D, 0);
#pragma omp parallel shared(D) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(D,i,i,1.0);
}
}
}
/* invert diagonal matrix */
void invert_diagonal_matrix(mat *Dinv, mat *D){
int i;
#pragma omp parallel shared(D,Dinv) private(i)
{
#pragma omp parallel for
for(i=0; i<(D->nrows); i++){
matrix_set_element(Dinv,i,i,1.0/(matrix_get_element(D,i,i)));
}
}
}
/* C = A*B ; column major */
void matrix_matrix_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* C = A^T*B ; column major */
void matrix_transpose_matrix_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* C = A*B^T ; column major */
void matrix_matrix_transpose_mult(mat *A, mat *B, mat *C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows);
}
/* y = M*x ; column major */
void matrix_vector_mult(mat *M, vec *x, vec *y){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemv (CblasColMajor, CblasNoTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1);
}
/* y = M^T*x ; column major */
void matrix_transpose_vector_mult(mat *M, vec *x, vec *y){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemv (CblasColMajor, CblasTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1);
}
/* set column of matrix to vector */
void matrix_set_col(mat *M, int j, vec *column_vec){
int i;
#pragma omp parallel shared(column_vec,M,j) private(i)
{
#pragma omp for
for(i=0; i<M->nrows; i++){
matrix_set_element(M,i,j,vector_get_element(column_vec,i));
}
}
}
/* extract column of a matrix into a vector */
void matrix_get_col(mat *M, int j, vec *column_vec){
int i;
#pragma omp parallel shared(column_vec,M,j) private(i)
{
#pragma omp parallel for
for(i=0; i<M->nrows; i++){
vector_set_element(column_vec,i,matrix_get_element(M,i,j));
}
}
}
/* extract row i of a matrix into a vector */
void matrix_get_row(mat *M, int i, vec *row_vec){
int j;
#pragma omp parallel shared(row_vec,M,i) private(j)
{
#pragma omp parallel for
for(j=0; j<M->ncols; j++){
vector_set_element(row_vec,j,matrix_get_element(M,i,j));
}
}
}
/* put vector row_vec as row i of a matrix */
void matrix_set_row(mat *M, int i, vec *row_vec){
int j;
#pragma omp parallel shared(row_vec,M,i) private(j)
{
#pragma omp parallel for
for(j=0; j<M->ncols; j++){
matrix_set_element(M,i,j,vector_get_element(row_vec,j));
}
}
}
/* Mc = M(:,inds) */
/*void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec = vector_new(M->nrows);
for(i=0; i<(Mc->ncols); i++){
matrix_get_col(M,inds[i],col_vec);
matrix_set_col(Mc,i,col_vec);
}
vector_delete(col_vec);
}*/
/* Mc = M(:,inds) */
void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec;
//printf("%d %d\n", M->ncols, Mc->ncols);
#pragma omp parallel shared(M,Mc,inds) private(i,col_vec)
{
#pragma omp parallel for
for(i=0; i<(Mc->ncols); i++){
//printf("line:%d\n", i);
col_vec = vector_new(M->nrows);
matrix_get_col(M,inds[i],col_vec);
matrix_set_col(Mc,i,col_vec);
vector_delete(col_vec);
}
}
}
/* M(:,inds) = Mc */
/*void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec = vector_new(M->nrows);
for(i=0; i<(Mc->ncols); i++){
matrix_get_col(Mc,i,col_vec);
matrix_set_col(M,inds[i],col_vec);
}
vector_delete(col_vec);
}*/
/* M(:,inds) = Mc */
void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){
int i;
vec *col_vec;
#pragma omp parallel shared(M,Mc,inds) private(i,col_vec)
{
#pragma omp parallel for
for(i=0; i<(Mc->ncols); i++){
col_vec = vector_new(M->nrows);
matrix_get_col(Mc,i,col_vec);
matrix_set_col(M,inds[i],col_vec);
vector_delete(col_vec);
}
}
}
/* Mr = M(inds,:) */
/*void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec = vector_new(M->ncols);
for(i=0; i<(Mr->nrows); i++){
matrix_get_row(M,inds[i],row_vec);
matrix_set_row(Mr,i,row_vec);
}
vector_delete(row_vec);
}*/
/* Mr = M(inds,:) */
void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec;
#pragma omp parallel shared(M,Mr,inds) private(i,row_vec)
{
#pragma omp parallel for
for(i=0; i<(Mr->nrows); i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,inds[i],row_vec);
matrix_set_row(Mr,i,row_vec);
vector_delete(row_vec);
}
}
}
/* M(inds,:) = Mr */
/*void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec = vector_new(M->ncols);
for(i=0; i<(Mr->nrows); i++){
matrix_get_row(Mr,i,row_vec);
matrix_set_row(M,inds[i],row_vec);
}
vector_delete(row_vec);
}*/
/* M(inds,:) = Mr */
void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){
int i;
vec *row_vec;
#pragma omp parallel shared(M,Mr,inds) private(i,row_vec)
{
#pragma omp parallel for
for(i=0; i<(Mr->nrows); i++){
row_vec = vector_new(M->ncols);
matrix_get_row(Mr,i,row_vec);
matrix_set_row(M,inds[i],row_vec);
vector_delete(row_vec);
}
}
}
/* copy only upper triangular matrix part as for symmetric matrix */
void matrix_copy_symmetric(mat *S, mat *M){
int i,j,n,m;
m = M->nrows;
n = M->ncols;
for(i=0; i<m; i++){
for(j=0; j<n; j++){
if(j>=i){
matrix_set_element(S,i,j,matrix_get_element(M,i,j));
}
}
}
}
/* copy only upper triangular matrix part as for symmetric matrix */
void matrix_keep_only_upper_triangular(mat *M){
int i,j,n,m;
m = M->nrows;
n = M->ncols;
for(i=0; i<m; i++){
for(j=0; j<n; j++){
if(j<i){
matrix_set_element(M,i,j,0);
}
}
}
}
/*
% project v in direction of u
function p=project_vec(v,u)
p = (dot(v,u)/norm(u)^2)*u;
*/
void project_vector(vec *v, vec *u, vec *p){
double dot_product_val, vec_norm, scalar_val;
dot_product_val = vector_dot_product(v, u);
vec_norm = vector_get2norm(u);
scalar_val = dot_product_val/(vec_norm*vec_norm);
vector_copy(p, u);
vector_scale(p, scalar_val);
}
/* build orthonormal basis matrix
Q = Y;
for j=1:k
vj = Q(:,j);
for i=1:(j-1)
vi = Q(:,i);
vj = vj - project_vec(vj,vi);
end
vj = vj/norm(vj);
Q(:,j) = vj;
end
*/
void build_orthonormal_basis_from_mat(mat *A, mat *Q){
int m,n,i,j,ind,num_ortos=2;
double vec_norm;
vec *vi,*vj,*p;
m = A->nrows;
n = A->ncols;
vi = vector_new(m);
vj = vector_new(m);
p = vector_new(m);
matrix_copy(Q, A);
for(ind=0; ind<num_ortos; ind++){
for(j=0; j<n; j++){
matrix_get_col(Q, j, vj);
for(i=0; i<j; i++){
matrix_get_col(Q, i, vi);
project_vector(vj, vi, p);
vector_sub(vj, p);
}
vec_norm = vector_get2norm(vj);
vector_scale(vj, 1.0/vec_norm);
matrix_set_col(Q, j, vj);
}
}
vector_delete(vi);
vector_delete(vj);
vector_delete(p);
}
/* output = input[inds] */
void fill_vector_from_row_list(vec *input, vec *inds, vec *output){
int i,col_num;
for(i=0; i<(input->nrows); i++){
vector_set_element(output,i,vector_get_element(input,vector_get_element(inds,i)));
}
}
/* copy the first k rows of M into M_out where k = M_out->nrows (M_out pre-initialized) */
void matrix_copy_first_rows(mat *M_out, mat *M){
int i,k;
k = M_out->nrows;
vec * row_vec;
for(i=0; i<k; i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,i,row_vec);
matrix_set_row(M_out,i,row_vec);
vector_delete(row_vec);
}
}
/* copy the first k columns of M into M_out where k = M_out->ncols (M_out pre-initialized) */
void matrix_copy_first_columns(mat *M_out, mat *M){
int i,k;
k = M_out->ncols;
vec * col_vec;
for(i=0; i<k; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_out,i,col_vec);
vector_delete(col_vec);
}
}
/* copy contents of mat S to D */
void matrix_copy_first_columns_with_param(mat *D, mat *S, int num_columns){
int i,j;
for(i=0; i<(S->nrows); i++){
for(j=0; j<num_columns; j++){
matrix_set_element(D,i,j,matrix_get_element(S,i,j));
}
}
}
/* copy the first k rows and columns of M into M_out is kxk where k = M_out->ncols (M_out pre-initialized)
M_out = M(1:k,1:k) */
void matrix_copy_first_k_rows_and_columns(mat *M_out, mat *M){
int i,j,k;
k = M_out->ncols;
vec * col_vec;
for(i=0; i<k; i++){
for(j=0; j<k; j++){
matrix_set_element(M_out,i,j,matrix_get_element(M,i,j));
}
}
}
/* M_out = M(:,k+1:end) */
void matrix_copy_all_rows_and_last_columns_from_indexk(mat *M_out, mat *M, int k){
int i,j,i_out,j_out;
vec * col_vec;
for(i=0; i<(M->nrows); i++){
for(j=k; j<(M->ncols); j++){
i_out = i; j_out = j - k;
matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j));
}
}
}
void fill_matrix_from_first_rows(mat *M, int k, mat *M_k){
int i;
vec *row_vec;
//#pragma omp parallel shared(M,M_k,k) private(i,row_vec)
{
//#pragma omp for
for(i=0; i<k; i++){
row_vec = vector_new(M->ncols);
matrix_get_row(M,i,row_vec);
matrix_set_row(M_k,i,row_vec);
vector_delete(row_vec);
}
}
}
void fill_matrix_from_first_columns(mat *M, int k, mat *M_k){
int i;
vec *col_vec;
//#pragma omp parallel shared(M,M_k,k) private(i,col_vec)
{
//#pragma omp for
for(i=0; i<k; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_k,i,col_vec);
vector_delete(col_vec);
}
}
}
void fill_matrix_from_last_columns(mat *M, int k, mat *M_k){
int i,ind;
vec *col_vec;
ind = 0;
for(i=k; i<M->ncols; i++){
col_vec = vector_new(M->nrows);
matrix_get_col(M,i,col_vec);
matrix_set_col(M_k,ind,col_vec);
vector_delete(col_vec);
ind++;
}
}
/* Mout = M((k+1):end,(k+1):end) in matlab notation */
void fill_matrix_from_lower_right_corner(mat *M, int k, mat *M_out){
int i,j,i_out,j_out;
for(i=k; i<M->nrows; i++){
for(j=k; j<M->ncols; j++){
i_out = i-k;
j_out = j-k;
//printf("setting element %d, %d of M_out\n", i_out, j_out);
matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j));
}
}
}
/* append matrices side by side: C = [A, B] */
void append_matrices_horizontally(mat *A, mat *B, mat *C){
int i,j;
#pragma omp parallel shared(C,A) private(i)
{
#pragma omp for
for(i=0; i<((A->nrows)*(A->ncols)); i++){
C->d[i] = A->d[i];
}
}
#pragma omp parallel shared(C,B,A) private(i)
{
#pragma omp for
for(i=0; i<((B->nrows)*(B->ncols)); i++){
C->d[i + (A->nrows)*(A->ncols)] = B->d[i];
}
}
/*
for(i=0; i<A->nrows; i++){
for(j=0; j<A->ncols; j++){
matrix_set_element(C,i,j,matrix_get_element(A,i,j));
}
}
for(i=0; i<B->nrows; i++){
for(j=0; j<B->ncols; j++){
matrix_set_element(C,i,A->ncols + j,matrix_get_element(B,i,j));
}
}*/
}
/* append matrices vertically: C = [A; B] */
void append_matrices_vertically(mat *A, mat *B, mat *C){
int i,j;
for(i=0; i<A->nrows; i++){
for(j=0; j<A->ncols; j++){
matrix_set_element(C,i,j,matrix_get_element(A,i,j));
}
}
for(i=0; i<B->nrows; i++){
for(j=0; j<B->ncols; j++){
matrix_set_element(C,A->nrows+i,j,matrix_get_element(B,i,j));
}
}
}
/* compute eigendecomposition of symmetric matrix M
*/
void compute_evals_and_evecs_of_symm_matrix(mat *S, vec *evals){
//LAPACKE_dsyev( LAPACK_ROW_MAJOR, 'V', 'U', S->nrows, S->d, S->nrows, evals->d);
LAPACKE_dsyev( LAPACK_COL_MAJOR, 'V', 'U', S->nrows, S->d, S->ncols, evals->d);
}
/* Performs [Q,R] = qr(M,'0') compact QR factorization
M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */
void compact_QR_factorization(mat *M, mat *Q, mat *R){
int i,j,m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
// printf("doing QR with m = %d, n = %d, k = %d\n", m,n,k);
mat *R_full = matrix_new(m,n);
matrix_copy(R_full,M);
//vec *tau = vector_new(n);
vec *tau = vector_new(k);
// get R
//printf("get R..\n");
//LAPACKE_dgeqrf(CblasColMajor, m, n, R_full->d, n, tau->d);
LAPACKE_dgeqrf(LAPACK_COL_MAJOR, R_full->nrows, R_full->ncols, R_full->d, R_full->nrows, tau->d);
for(i=0; i<k; i++){
for(j=0; j<k; j++){
if(j>=i){
matrix_set_element(R,i,j,matrix_get_element(R_full,i,j));
}
}
}
// get Q
matrix_copy(Q,R_full);
//printf("dorgqr..\n");
LAPACKE_dorgqr(LAPACK_COL_MAJOR, Q->nrows, Q->ncols, min(Q->ncols,Q->nrows), Q->d, Q->nrows, tau->d);
// clean up
matrix_delete(R_full);
vector_delete(tau);
}
/* returns Q from [Q,R] = qr(M,'0') compact QR factorization
M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */
void QR_factorization_getQ(mat *M, mat *Q){
int i,j,m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
matrix_copy(Q,M);
vec *tau = vector_new(k);
LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d);
LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d);
// clean up
vector_delete(tau);
}
void QR_factorization_getQ_inplace(mat *Q) {
// printf("k1\n");
int i,j,m,n,k;
m = Q->nrows; n = Q->ncols;
k = min(m,n);
int *jpvt = (int*)malloc(n*sizeof(int));
vec *tau = vector_new(k);
// check memory allocation
// printf("k1b\n");
// for (i=0; i++; i<m) {
// for (j=0; j++; j<n) {
// matrix_set_element(Q, i, j, matrix_get_element(Q, i, j));
// }
// }
/*
BUG DETECTED! the dgeqrf call raises segmentation fault occasionally.
the arguments passed to it seems to be fine. probably it's due to bug
internal to MKL.
To reproduce the bug: call qr_bug_reproduce() in main.c
*/
// printf("k2 m=%d,n=%d,size=%d,tau=%d\n", m, n, sizeof(Q->d), k);
// LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d);
LAPACKE_dgeqpf(LAPACK_COL_MAJOR, m, n, Q->d, m, jpvt, tau->d);
// printf("k2b\n");
LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d);
// printf("k3\n");
// clean up
vector_delete(tau);
free(jpvt);
// printf("k4\n");
}
/* computes SVD: M = U*S*Vt; note Vt = V^T */
void singular_value_decomposition(mat *M, mat *U, mat *S, mat *Vt){
int m,n,k;
m = M->nrows; n = M->ncols;
k = min(m,n);
vec * work = vector_new(2*max(3*min(m, n)+max(m, n), 5*min(m,n)));
vec * svals = vector_new(k);
LAPACKE_dgesvd( LAPACK_COL_MAJOR, 'S', 'S', m, n, M->d, m, svals->d, U->d, m, Vt->d, k, work->d );
initialize_diagonal_matrix(S, svals);
vector_delete(work);
vector_delete(svals);
}
void form_svd_product_matrix(mat *U, mat *S, mat *V, mat *P){
int k,m,n;
double alpha, beta;
alpha = 1.0; beta = 0.0;
m = P->nrows;
n = P->ncols;
k = S->nrows;
mat * SVt = matrix_new(k,n);
// form SVt = S*V^T
matrix_matrix_transpose_mult(S,V,SVt);
// form P = U*S*V^T
matrix_matrix_mult(U,SVt,P);
}
void estimate_rank_and_buildQ(mat *M, double frac_of_max_rank, double TOL, mat **Q, int *good_rank){
int m,n,i,j,ind,maxdim;
double vec_norm;
mat *RN,*Y,*Qbig,*Qsmall;
vec *vi,*vj,*p,*p1;
m = M->nrows;
n = M->ncols;
maxdim = round(min(m,n)*frac_of_max_rank);
vi = vector_new(m);
vj = vector_new(m);
p = vector_new(m);
p1 = vector_new(m);
// build random matrix
printf("form RN..\n");
RN = matrix_new(n, maxdim);
initialize_random_matrix(RN);
// multiply to get matrix of random samples Y
printf("form Y: %d x %d..\n",m,maxdim);
Y = matrix_new(m, maxdim);
matrix_matrix_mult(M, RN, Y);
// estimate rank k and build Q from Y
printf("form Qbig..\n");
Qbig = matrix_new(m, maxdim);
matrix_copy(Qbig, Y);
printf("estimate rank with TOL = %f..\n", TOL);
*good_rank = maxdim;
int forbreak = 0;
for(j=0; !forbreak && j<maxdim; j++){
matrix_get_col(Qbig, j, vj);
for(i=0; i<j; i++){
matrix_get_col(Qbig, i, vi);
project_vector(vj, vi, p);
vector_sub(vj, p);
if(vector_get2norm(p) < TOL && vector_get2norm(p1) < TOL){
*good_rank = j;
forbreak = 1;
break;
}
vector_copy(p1,p);
}
vec_norm = vector_get2norm(vj);
vector_scale(vj, 1.0/vec_norm);
matrix_set_col(Qbig, j, vj);
}
printf("estimated rank = %d\n", *good_rank);
Qsmall = matrix_new(m, *good_rank);
*Q = matrix_new(m, *good_rank);
matrix_copy_first_columns(Qsmall, Qbig);
QR_factorization_getQ(Qsmall, *Q);
matrix_delete(RN);
matrix_delete(Y);
matrix_delete(Qsmall);
matrix_delete(Qbig);
}
void estimate_rank_and_buildQ2(mat *M, int kblock, double TOL, mat **Y, mat **Q, int *good_rank){
int m,n,i,j,ind,exit_loop = 0;
double error_norm;
mat *RN,*Y_new,*Y_big,*QtM,*QQtM;
vec *vi,*vj,*p,*p1;
m = M->nrows;
n = M->ncols;
// build random matrix
printf("form RN..\n");
RN = matrix_new(n,kblock);
initialize_random_matrix(RN);
// multiply to get matrix of random samples Y
printf("form Y: %d x %d..\n",m,kblock);
*Y = matrix_new(m, kblock);
matrix_matrix_mult(M, RN, *Y);
ind = 0;
while(!exit_loop){
printf("form Q..\n");
if(ind > 0){
matrix_delete(*Q);
}
*Q = matrix_new((*Y)->nrows, (*Y)->ncols);
QR_factorization_getQ(*Y, *Q);
// compute QtM
QtM = matrix_new((*Q)->ncols, M->ncols);
matrix_transpose_matrix_mult(*Q,M,QtM);
// compute QQtM
QQtM = matrix_new(M->nrows, M->ncols);
matrix_matrix_mult(*Q,QtM,QQtM);
error_norm = 0.01*get_percent_error_between_two_mats(QQtM, M);
printf("Y is of size %d x %d and error_norm = %f\n", (*Y)->nrows, (*Y)->ncols, error_norm);
*good_rank = (*Y)->ncols;
// add more samples if needed
if(error_norm > TOL){
Y_new = matrix_new(m, kblock);
initialize_random_matrix(RN);
matrix_matrix_mult(M, RN, Y_new);
Y_big = matrix_new((*Y)->nrows, (*Y)->ncols + Y_new->ncols);
append_matrices_horizontally(*Y, Y_new, Y_big);
matrix_delete(*Y);
*Y = matrix_new(Y_big->nrows,Y_big->ncols);
matrix_copy(*Y,Y_big);
matrix_delete(Y_big);
matrix_delete(Y_new);
matrix_delete(QtM);
matrix_delete(QQtM);
ind++;
}
else{
matrix_delete(RN);
exit_loop = 1;
}
}
}
double get_seconds_frac(struct timeval start_timeval, struct timeval end_timeval){
long secs_used, micros_used;
secs_used=(end_timeval.tv_sec - start_timeval.tv_sec);
micros_used= ((secs_used*1000000) + end_timeval.tv_usec) - (start_timeval.tv_usec);
return (micros_used/1e6);
}
/*********************Lijian***********************/
/* initialize new matrix and set all entries to zero for float*/
void matrix_matrix_mult_row(mat *A, mat* B, mat* C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
}
void matrix_transpose_matrix_mult_row(mat *A, mat* B, mat* C){
double alpha, beta;
alpha = 1.0; beta = 0.0;
cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols);
}
/*********************Lijian***********************/
|
helloworld.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
// Each thread has a private copy of the number of threads
// and its own number
#pragma omp parallel private(nthreads, tid)
{
nthreads = omp_get_num_threads();
// Obtain thread number
tid = omp_get_thread_num();
printf("'Hello, World!' from thread %d\n", tid);
// Thread no. 0 does additional work
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} // End of parallel region
return 0;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(24*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(24*t3+Nx+11,32));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),8*t4+6);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__atanh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__atanh_fp64_fp64
// op(A') function: GB_unop_tran__atanh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = atanh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = atanh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = atanh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__atanh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = atanh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = atanh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__atanh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
create_dummy_ciphertext.c | #include <ristretto_elgamal.h>
#include <omp.h>
#include <time.h>
#include <stdio.h>
/*
* This executable program creates dummy ciphertexts under a specific pair of public keys
* In order to create the dummy ciphertext for 574-block case,
* the stack size limit may need to be elevated.
*/
int main() {
printf("\033[0;32m[INFO]\033[0m Loading public keys from the current directory...\n");
ristretto255_point_t pk[59];
LoadPubKey(pk, "./pub.key");
fastecexp_state st_pk[60];
char filename[59][150];
#pragma omp parallel for
for (int i = 0; i < 59; i++) {
sprintf(filename[i], "/table/pub_%d.tab", i);
TableLoad(&st_pk[i], filename[i]);
}
TableLoad(&st_pk[59], "/table/pub_base.tab");
printf("\033[0;32m[INFO]\033[0m Public keys loaded.\n");
int BLOCK_array[4];
BLOCK_array[0] = 3;
BLOCK_array[1] = 9;
BLOCK_array[2] = 36;
BLOCK_array[3] = 574;
/* 1MB => 574, 64KB => 36, 16KB => 9, 4KB => 3 */
for (int BLOCK_array_index = 0; BLOCK_array_index < 4; BLOCK_array_index++) {
int BLOCK = BLOCK_array[BLOCK_array_index];
printf("\033[0;32m[INFO]\033[0m Preparing a dummy ciphertext of %d blocks.\n", BLOCK);
uint8_t input[1827 * BLOCK];
memset(input, 0, sizeof(input));
FILE *rand_src = fopen("/dev/urandom", "rb");
/* encode */
ristretto255_point_t output[59 * BLOCK];
ristretto_elgamal_encode(output, input, 0, 1827 * BLOCK);
/* encrypt */
ristretto255_point_t ct[60 * BLOCK];
for (int i = 0; i < BLOCK; i++) {
Encrypt(&ct[i * 60], &output[i * 59], st_pk, rand_src);
}
fclose(rand_src);
/* encode the ciphertext */
size_t serialized_ct_size = Serialize_Honest_Size(60 * BLOCK);
unsigned char *str = malloc(sizeof(char) * serialized_ct_size);
Serialize_Honest(str, ct, 60 * BLOCK);
/* encode the plaintext */
size_t serialized_pt_size = Serialize_Honest_Size(59 * BLOCK);
unsigned char *str_pt = malloc(sizeof(char) * serialized_pt_size);
Serialize_Honest(str_pt, output, 59 * BLOCK);
Deserialize_Honest(output, str_pt, 59 * BLOCK);
printf("\033[0;32m[INFO]\033[0m Writing the dummy ciphertext of %d blocks to ./data/dummy_ciphertext_%d.\n",
BLOCK, BLOCK);
char filename_dummy_ciphertext[150];
sprintf(filename_dummy_ciphertext, "./data/dummy_ciphertext_%d", BLOCK);
FILE *fp_dummy_ciphertext = fopen(filename_dummy_ciphertext, "wb");
if (fp_dummy_ciphertext == NULL) {
printf("\033[0;31m[ERROR]\033[0m Failed to write the dummy ciphertext.\n");
exit(1);
}
fwrite(str, serialized_ct_size, 1, fp_dummy_ciphertext);
fclose(fp_dummy_ciphertext);
printf("\033[0;32m[INFO]\033[0m Dummy ciphertext of %d blocks written.\n", BLOCK);
printf("\033[0;32m[INFO]\033[0m Writing the dummy plaintext of %d blocks to ./data/dummy_plaintext_%d.\n",
BLOCK, BLOCK);
char filename_dummy_plaintext[150];
sprintf(filename_dummy_plaintext, "./data/dummy_plaintext_%d", BLOCK);
FILE *fp_dummy_plaintext = fopen(filename_dummy_plaintext, "wb");
if (fp_dummy_plaintext == NULL) {
printf("\033[0;31m[ERROR]\033[0m Failed to write the dummy plaintext.\n");
exit(1);
}
fwrite(str_pt, serialized_pt_size, 1, fp_dummy_plaintext);
fclose(fp_dummy_plaintext);
printf("\033[0;32m[INFO]\033[0m Dummy plaintext of %d blocks written.\n", BLOCK);
printf("\033[0;32m[INFO]\033\033\033[0m Checking if the dummy ciphertext can be decrypted correctly...\n");
uint8_t recovered[1827 * BLOCK];
size_t actual_size;
memset(input, 0, sizeof(input));
ristretto_elgamal_decode(recovered, output, 59 * BLOCK, &actual_size, 1827 * BLOCK);
printf("\033[0;32m[INFO]\033\033[0m Decrypted plaintext has a size of %ld bytes (expected: 0 bytes).\n",
actual_size);
free(str);
}
for (int i = 0; i < 59; i++) {
TableRelease(&st_pk[i]);
}
return 0;
}
|
volume_raycast_benchmark.h | /// <copyright file="volume_raycast_benchmark.h" company="Visualisierungsinstitut der Universität Stuttgart">
/// Copyright © 2016 - 2018 Visualisierungsinstitut der Universität Stuttgart. Alle Rechte vorbehalten.
/// Licensed under the MIT licence. See LICENCE.txt file in the project root for full licence information.
/// </copyright>
/// <author>Valentin Bruder</author>
#pragma once
#include "trrojan/benchmark.h"
#include "trrojan/camera.h"
#include "trrojan/trackball.h"
#include "trrojan/opencl/export.h"
#include "trrojan/opencl/scalar_type.h"
#include "trrojan/opencl/dat_raw_reader.h"
#include "trrojan/opencl/environment.h"
#include "trrojan/opencl/util.h"
#include "trrojan/enum_parse_helper.h"
#include <unordered_set>
#include <unordered_map>
namespace trrojan
{
namespace opencl
{
/// <summary>
/// The implementation of a basic volume raycasting benchmark.
/// </summary>
/// <remarks>
/// Volume raycasting benchmark with front to back compositing
/// using a 1D transfer function to map density values to color and opacity.
/// Optionally, early ray termination (ERT) and empty space skipping (ESS)
/// are used as acceleration techniques.
/// </remarks>
class TRROJANCL_API volume_raycast_benchmark : public trrojan::benchmark_base
{
public:
typedef benchmark_base::on_result_callback on_result_callback;
// TODO remove hard coded paths
static const std::string kernel_source_path;
static const std::string kernel_snippet_path;
static const std::string test_volume;
// factor strings
static const std::string factor_environment;
static const std::string factor_environment_vendor;
static const std::string factor_device;
static const std::string factor_device_type;
static const std::string factor_device_vendor;
static const std::string factor_iterations;
static const std::string factor_volume_file_name;
static const std::string factor_tff_file_name;
static const std::string factor_viewport;
static const std::string factor_step_size_factor;
static const std::string factor_cam_position;
static const std::string factor_cam_rotation;
static const std::string factor_maneuver;
static const std::string factor_maneuver_samples;
static const std::string factor_maneuver_iteration;
static const std::string factor_sample_precision;
static const std::string factor_use_lerp;
static const std::string factor_use_ERT;
static const std::string factor_use_ESS;
static const std::string factor_use_tff;
static const std::string factor_use_dvr;
static const std::string factor_shuffle;
static const std::string factor_use_buffer;
static const std::string factor_use_illumination;
static const std::string factor_use_ortho_proj;
static const std::string factor_img_output;
static const std::string factor_count_samples;
static const std::string factor_data_precision;
static const std::string factor_volume_res_x;
static const std::string factor_volume_res_y;
static const std::string factor_volume_res_z;
static const std::string factor_volume_scaling;
enum kernel_arg
{
VOLUME = 0 // volume data set memory object
, OUTPUT = 1 // output image memory object
, TFF // transfer function memory object
, VIEW // view matrix memory object
, ID // shuffled ray IDs memory object
, STEP_SIZE // step size factor cl_float
, RESOLUTION // volume resolution cl_int3
, SAMPLER // image data sampler cl::Sampler
, PRECISION // precision divisor cl_float
, MODEL_SCALE
, BRICKS
, TFF_PREFIX
// , OFFSET // TODO: ID offset cl_int2
};
/// <summary>
/// Constructor. Default config is defined here.
/// </summary>
volume_raycast_benchmark(void);
/// <summary>
/// Destructor.
/// </summary>
virtual ~volume_raycast_benchmark(void);
/// <summary>
/// Overrides benchmark run method.
/// </summary>
virtual size_t run(const configuration_set &configs,
const on_result_callback& result_callback);
/// <summary>
/// Overrides benchmark run method.
/// </summary>
virtual result run(const configuration &cfg);
///
/// \brief can_run
/// \param env
/// \param device
/// \return
///
virtual bool can_run(trrojan::environment env, trrojan::device device) const noexcept;
private:
/// <summary>
/// Add a factor that is relevant during kernel run-time.
/// </summary>
/// <param ="name">Name of the factor</param>
/// <param name="value">Value of the factor</param>
void add_kernel_run_factor(std::string name, variant value);
/// <summary>
/// Add a factor that is relevant during kernel build time.
/// </summary>
/// <param name="name">Name of the factor</param>
/// <param name="value">Value of the factor</param>
void add_kernel_build_factor(std::string name, variant value);
/// <summary>
/// Initialize shuffled ray ids and set up kernel buffer.
/// </summary>
/// <param name="env" OpenCL environment pointer.</param>
/// <param name="viewport" Viewpoer size.</param>
void set_shuffled_ray_ids(const environment::pointer env,
const std::array<unsigned int, 2> viewport);
/// <summary>
/// Set-up basic raycaster configuration.
/// </summary>
/// <remarks>Normally, this method only needs to be invoked once
/// before the first run.</remarks>
/// <param name="cfg">The currently active configuration.</param>
void setup_raycaster(const configuration &cfg);
/// <summary>
/// Setup the volume data set with the given configuration <paramref name="cfg" />.
/// </summary>
/// <param name="cfg">Refenrence to the configuration that is to be set-up.</param>
/// <param name="changed">Set of factor names that have changed since the last run</param>
void setup_volume_data(const configuration &cfg,
const std::unordered_set<std::string> changed);
/// <summary>
/// Load volume data based on information from the given .dat file.
/// </summary>
/// <param name="dat_file">Name of the .dat-file that contains the information
/// on the volume data.</param>
const std::vector<char> &load_volume_data(const std::string dat_file);
/// <summary>
/// Read a transfer function from the file with the given name.
/// A transfer function has exactly 256 RGBA floating point values.
/// We try to find those in the given input file by parsing for whitespace separated
/// floating point values.
/// However, if there are too many values in the file, we trunctuate respectively
/// fill with zeros.
/// If no trransfer function file is specified (i.e. the factor string is "fallback"),
/// we use a default linear function with range [0;1] as fallback.
/// </summary>
/// <remarks>The read will fail on the first sign that is neither a numeric value,
/// nor a whitespace</remarks>
/// <param name="file_name">The name (and path) of the file that contains the
/// transfer function in form of numeric values.</param>
/// <param name="env">Pointer to environment.</param>
void load_transfer_function(const std::string file_name, environment::pointer env);
/// <summary>
/// Selects the correct source scalar type <paramref name="s" />
/// and continues with dispatching the target type.
/// </summary>
template<trrojan::opencl::scalar_type S,
trrojan::opencl::scalar_type... Ss,
class... P>
inline void dispatch(
trrojan::opencl::scalar_type_list_t<S, Ss...>,
const trrojan::opencl::scalar_type s,
const trrojan::opencl::scalar_type t,
P&&... params)
{
if (S == s)
{
//std::cout << "scalar type " << (int) S << " selected." << std::endl;
this->dispatch<S>(scalar_type_list(), t, std::forward<P>(params)...);
}
else
{
this->dispatch(trrojan::opencl::scalar_type_list_t<Ss...>(),
s, t, std::forward<P>(params)...);
}
}
/// <summary>
/// Recursion stop.
/// </summary>
template<class... P>
inline void dispatch(trrojan::opencl::scalar_type_list_t<>,
const trrojan::opencl::scalar_type s,
const trrojan::opencl::scalar_type t,
P&&... params)
{
throw std::runtime_error("Resolution failed.");
}
/// <summary>
/// Selects the specified target scalar type <paramref name="t" />
/// and continues with the conversion.
/// </summary>
template<trrojan::opencl::scalar_type S,
trrojan::opencl::scalar_type T,
trrojan::opencl::scalar_type... Ts,
class... P>
inline void dispatch(
trrojan::opencl::scalar_type_list_t<T, Ts...>,
const trrojan::opencl::scalar_type t,
P&&... params)
{
if (T == t)
{
typedef typename scalar_type_traits<S>::type src_type;
typedef typename scalar_type_traits<T>::type dst_type;
this->convert_data_precision<src_type, dst_type>(
std::forward<P>(params)...);
}
else
{
this->dispatch<S>(trrojan::opencl::scalar_type_list_t<Ts...>(),
t, std::forward<P>(params)...);
}
}
/// <summary>
/// Recursion stop.
/// </summary>
template<trrojan::opencl::scalar_type S, class... P>
inline void dispatch(trrojan::opencl::scalar_type_list_t<>,
const trrojan::opencl::scalar_type t,
P&&... params)
{
throw std::runtime_error("Resolution failed.");
}
/// <summary>
/// Scale the volume <paramref name="data" /> by <paramref name="factor" /> in each
/// dimension.
/// </summary>
/// <param name="dara">The volume data.</param>
/// <param name="factor">The scaling factor.</param>
/// <param name="volume_res">The volume data set reolution.</param>
template<class T>
void scale_data(std::vector<T> &data,
std::array<unsigned, 3> &volume_res,
const double factor)
{
size_t voxel_cnt = 0;
std::array<unsigned, 3> native_res;
for (size_t i = 0; i < volume_res.size(); ++i)
{
native_res[i] = volume_res[i];
volume_res[i] *= factor;
}
voxel_cnt = volume_res[0] * volume_res[1] * volume_res[2];
std::vector<T> data_scaled(voxel_cnt, 0);
#pragma omp parallel for
for (int z = 0; z < (int)volume_res[2]; ++z)
{
for (int y = 0; y < (int)volume_res[1]; ++y)
{
for (int x = 0; x < (int)volume_res[0]; ++x)
{
size_t data_id = floor(x/factor)
+ native_res[0]*floor(y/factor)
+ native_res[0]*native_res[1]*floor(z/factor);
data_scaled.at(x + volume_res[0]*y + volume_res[0]*volume_res[1]*z) =
data.at(data_id);
}
}
}
data = data_scaled;
}
/// <summary>
/// Convert scalar raw volume data from a given input type to a given output type
/// and create an OpenCL memory object with the resulting data.
/// </summary>
/// <param name="volume_data">Reference to the scalar input data</param>
/// <param name="ue_buffer">Switch parameter to indicate whether a linear buffer
/// or a 3d image buffer is to be created in OpenCL.</param>
/// <tParam name="From">Data precision of the input scalar volume data.</tParam>
/// <tParam name="To">Data precision of the data from which the OpenCL memory
/// objects are to be created</tParam>
template<class From, class To>
void convert_data_precision(const std::vector<char> &volume_data,
const bool use_buffer,
environment::pointer cl_env,
const double scaling_factor = 1.0)
{
// reinterpret raw data (char) to input format
auto s = reinterpret_cast<const From *>(volume_data.data());
auto e = reinterpret_cast<const From *>(volume_data.data() + volume_data.size());
// convert imput vector to the desired output precision
std::vector<To> converted_data(s, e);
// manual downcast if necessary
if (sizeof(To) < sizeof(From))
{
double div = pow(2.0, (sizeof(From) - sizeof(To))*8);
#pragma omp parallel for
for (long long int i = 0; i < (long long int)converted_data.size(); ++i)
{
converted_data.at(i) = s[i] / div;
}
}
_volume_res = _dr.properties().volume_res;
if (scaling_factor != 1)
{
scale_data(converted_data, _volume_res, scaling_factor);
std::cout << "Volume data scaled by factor " << scaling_factor << std::endl;
}
try
{
if (use_buffer)
{
_volume_mem = cl::Buffer(cl_env->get_properties().context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
converted_data.size()*sizeof(To),
converted_data.data());
}
else // texture
{
cl::ImageFormat format;
format.image_channel_order = CL_R;
switch (sizeof(To))
{
case 1:
format.image_channel_data_type = CL_UNORM_INT8; break;
case 2:
format.image_channel_data_type = CL_UNORM_INT16; break;
case 4:
format.image_channel_data_type = CL_FLOAT; break;
case 8:
throw std::invalid_argument(
"Double precision is not supported for OpenCL image formats.");
break;
default:
throw std::invalid_argument("Invalid volume data format."); break;
}
_volume_mem = cl::Image3D(cl_env->get_properties().context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
format,
_volume_res[0],
_volume_res[1],
_volume_res[2],
0, 0,
converted_data.data());
}
}
catch (cl::Error err)
{
throw std::runtime_error( "ERROR: " + std::string(err.what()) + "("
+ util::get_cl_error_str(err.err()) + ")");
}
}
/// <summary>
/// Parse a named variant for a scalar type.
/// </summary>
/// <param name="s">Refenrence to the named variant that is to be parsed.</param>
/// <returns>The scalar type</returns>
static inline scalar_type parse_scalar_type(const trrojan::named_variant& s)
{
typedef enum_parse_helper<scalar_type, scalar_type_traits, scalar_type_list_t> parser;
auto value = s.value().as<std::string>();
return parser::parse(scalar_type_list(), value);
}
/// <summary>
/// Create an OpenCL memory object.
/// </summary>
/// <param> TODO </param>
void create_vol_mem(const scalar_type data_precision,
const scalar_type sample_precision,
const std::vector<char> &raw_data,
const bool use_buffer,
environment::pointer env,
const double scaling_factor = 1.0);
/// <summary>
/// Compose and generate the OpenCL kernel source based on the given configuration.
/// </summary>
void compose_kernel(const configuration &cfg);
/// <summary>
/// Compile the OpenCL kernel source for the device referenced by <paramref name="dev" \>
/// on the plattform referenced by <paramref name="env" \>.
/// </summary>
/// <param name="env">Smart pointer to a valid OpenCL environment.</param>
/// <param name="dev">Smart pointer to a valid OpenCL device on the platform
/// <paramref name="env" \>.</param>
/// <param name="precision_div">Precision based devisor for kernel argument.</param>
/// <param name="build_flags">Compiler build flags.</param>
/// <throws>Runtime error if program creation, kernel build or initialization
/// fail.</throws>
void build_kernel(environment::pointer env,
device::pointer dev,
const std::string &kernel_source,
const float precision_div = 255.0f,
const std::string &build_flags = "");
/// <summary>
/// Update the camera configuration and set kernel argument.
/// No OpenCL error catching is performed.
/// </summary>
void update_camera(const trrojan::configuration &cfg);
/// <summary>
/// Set all constant kernel arguments such as the OpenCL memory objects.
/// </summary>
void set_kernel_args(const float precision_div);
///
/// TODO add descriotion
/// \brief update_all_kernel_args
/// \param cfg
///
void update_initial_kernel_args(const trrojan::configuration &cfg);
/// <summary>
/// Update arguments that are relavant for kernel execution during runtime.
/// </summary>
/// <param name="cfg">The current configuration.</param>
/// <param name="changed">List of all configuration parameter names that have changed
/// since the last run.</param>
void update_kernel_args(const configuration &cfg,
const std::unordered_set<std::string> changed);
/// <summary>
/// Read all kernel snippets that can be found in <paramref name="path" />,
/// i.e. all files with the ".cl" extension.
/// </summay>
/// <param name="path">The directory path containing the kernel snippets.</param>
void read_kernel_snippets(const std::string path);
/// <summary>
/// Replace the first keyword <paramref name="keyword" /> string that can be found in
/// <paramref name="text" /> with the <paramref name="insert" /> string. Keywords
/// in the <paramref name="text" /> have to be surrounded by <paramref name="prefix" />
/// and <paramref name="suffix" /> , the defauls are /***keyword***/.
/// <param name="keyword">The keyword string that is to be replaced</param>
/// <param name="insert">The string that is to be inserted in place of keyword</param>
/// <param name="text">Reference to the string that os to be manipulated.</param>
/// <param name="prefix">Keyword defining prefix, default is "/***".</param>
/// <param name="suffix">Keyword defining suffix, default is "***/".</param>
void replace_keyword(const std::string keyword,
const std::string insert,
std::string &text,
const std::string prefix = "/***",
const std::string suffix = "***/");
/// <summary>
/// Replace a keyword in <paramref name="kernel_source" /> with the snipped contained
/// in the _kernel_snippets member map, accessed by the <paramref name="keyword" />.
/// <param name="keyword">The keyword used as the key to access the snippet in
/// _kernel_snippets member variable.</param>
/// <param name="kernel_source">Reference to the kernel source that is to be
/// manipulated.</param>
void replace_kernel_snippet(const std::string keyword, std::string &kernel_source);
/// <summary>
/// Create a right handed, transposed view matrix from <paramref name="roll" />,
/// <paramref name="pitch" />, <paramref name="yaw" /> rotatians as well as the
/// camera distance (<paramref name="zoom />").
/// <param name="yaw">Rotation around the y-axis in radians.</param>
/// <param name="pitch">Rotation around the x-axis in radians.</param>
/// <param name="roll">Rotation around the z-axis in radians.</param>
/// <param name="zoom">Distance of the camera from the origin
/// (where the volume is centered)</param>
/// <remarks>Right handed coordinate system.</remarks>
/// <remarks>Assuming radians as input angles.</remarks>
/// <returns>The updated RH view matrix.</returns>
cl_float16 create_view_mat(double roll, double pitch, double yaw, double zoom);
/// <summary>
/// Interpret an OpenCL error <paramref name="error" /> and throw.
/// TODO: log to file.
/// </summary>
/// <param name="error">The OpenCL error objects.</param>
/// <throws>Runtime error.</throws>
void log_cl_error(cl::Error error);
/// <summary>
/// TODO add description calcScaling
/// </summary>
void calcScaling();
///
/// \brief set_tff_prefix_sum
/// \param tff_prefix_sum
/// \param env
///
void set_tff_prefix_sum(std::vector<unsigned int> &tff_prefix_sum,
environment::pointer env);
///
/// \brief set_mem_objects_brick_gen
///
void set_mem_objects_brick_gen();
///
/// \brief generate_bricks
/// \param env
///
void generate_bricks(environment::pointer env);
/// <summary>
/// Member to hold 'passive' configuration factors (i.e. they have no influence on tests),
/// that are read from the volume data set ".dat" file.
/// </summary>
trrojan::configuration _passive_cfg;
/// <summary>
/// Vector containing the names of all factors that are relevent at build time
/// of the OpenCL kernel.
/// </summary>
std::vector<std::string> _kernel_build_factors;
/// <summary>
/// Vector containing the names of all factors that are relevent at run-time
/// of the OpenCL kernel.
/// </summary>
std::vector<std::string> _kernel_run_factors;
/// <summary>
/// Dat raw reader object;
/// </summary>
dat_raw_reader _dr;
/// <summary>
/// Unordered map to store OpenCL kernel snippets.
/// </summary>
std::unordered_map<std::string, std::string> _kernel_snippets;
/// <summary>
/// Volume data as OpenCL memory object.
/// </summary>
/// <remarks>Can be represented either as a linear buffer or as a 3d image object.
cl::Memory _volume_mem;
/// <summary>
/// Lew resolution representation of volume data containing min and max values
/// for each brick consisting of resolution³/64³ voxels.
/// </summary>
cl::Image3D _brick_mem;
/// <summary>
/// The rendering output image.
/// </summary>
cl::Image2D _output_mem;
/// <summary>
/// Transfer function memory object as a 1d image representation.
/// </summary>
cl::Image1D _tff_mem;
/// <summary>
/// Transfer function prefix sum memory object as a 1d image representation.
/// </summary>
cl::Image1D _tff_prefix_mem;
/// <summary>
/// OpenCL buffer object for suffled ray IDs.
/// </summary>
cl::Buffer _ray_ids;
/// <summary>
/// Sampler for images in OpenCL kernel.
/// </summary>
cl::Sampler _sampler;
/// <summary>
/// The current OpenCL kernel for volume raycasting.
/// </summary>
cl::Kernel _kernel;
/// <summary>
/// The OpenCL kernel for generating low resolution brick volume.
/// </summary>
cl::Kernel _gen_bricks_kernel;
/// <summary>
/// Complete source of the current OpenCL kernel.
/// </summary>
std::string _kernel_source;
/// <summary>
/// Vector for storing the rendered output data (2d image).
/// </summary>
std::vector<float> _output_data;
/// <summary>
/// The volume data resolution <b>after</b> scaling.
/// </summary>
std::array<unsigned, 3> _volume_res;
/// <summary>
/// The camera.
/// </summary>
trrojan::perspective_camera _camera;
/// <summary>
/// Volume model scaling.
/// </summary>
glm::vec3 _model_scale;
/// <summary>
/// Data precision devision factor.
/// </summary>
float _precision_div;
};
}
}
|
dpado.202001141642.batch_number_limit.h | //
// Created by Zhen Peng on 1/6/20.
//
#ifndef PADO_DPADO_H
#define PADO_DPADO_H
#include <vector>
//#include <unordered_map>
#include <map>
#include <algorithm>
#include <iostream>
#include <limits.h>
//#include <xmmintrin.h>
#include <immintrin.h>
#include <bitset>
#include <math.h>
#include <fstream>
#include <omp.h>
#include "globals.h"
#include "dglobals.h"
#include "dgraph.h"
namespace PADO {
template <VertexID BATCH_SIZE = 1024>
class DistBVCPLL {
private:
static const VertexID BITPARALLEL_SIZE = 50;
const inti THRESHOLD_PARALLEL = 80;
// Structure for the type of label
struct IndexType {
// struct Batch {
// VertexID batch_id; // Batch ID
// VertexID start_index; // Index to the array distances where the batch starts
// VertexID size; // Number of distances element in this batch
//
// Batch() = default;
// Batch(VertexID batch_id_, VertexID start_index_, VertexID size_):
// batch_id(batch_id_), start_index(start_index_), size(size_)
// { }
// };
struct DistanceIndexType {
VertexID start_index; // Index to the array vertices where the same-distance vertices start
VertexID size; // Number of the same-distance vertices
UnweightedDist dist; // The real distance
DistanceIndexType() = default;
DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_):
start_index(start_index_), size(size_), dist(dist_)
{ }
};
// Bit-parallel Labels
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0}
// std::vector<Batch> batches; // Batch info
std::vector<DistanceIndexType> distances; // Distance info
std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID
size_t size() const
{
return sizeof(bp_dist) +
sizeof(bp_sets) +
// batches.size() * sizeof(Batch) +
distances.size() * sizeof(DistanceIndexType) +
vertices.size() * sizeof(VertexID);
}
}; //__attribute__((aligned(64)));
struct ShortIndex {
// I use BATCH_SIZE + 1 bit for indicator bit array.
// The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already.
// In this way, it helps update_label_indices() and can be reset along with other indicator elements.
// std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already
// If the Batch structure is not used, the indicator could just be BATCH_SIZE long.
std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0);
// std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0);
// Use a queue to store candidates
std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE);
VertexID end_candidates_que = 0;
std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0);
void indicator_reset()
{
std::fill(indicator.begin(), indicator.end(), 0);
}
}; //__attribute__((aligned(64)));
// Type of Bit-Parallel Label
struct BPLabelType {
UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 };
uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0}
};
// Type of Label Message Unit, for initializing distance table
struct LabelTableUnit {
VertexID root_id;
VertexID label_global_id;
UnweightedDist dist;
LabelTableUnit() = default;
LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
root_id(r), label_global_id(l), dist(d) {}
};
// Type of BitParallel Label Message Unit for initializing bit-parallel labels
struct MsgBPLabel {
VertexID r_root_id;
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2];
MsgBPLabel() = default;
MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
: r_root_id(r)
{
memcpy(bp_dist, dist, sizeof(bp_dist));
memcpy(bp_sets, sets, sizeof(bp_sets));
}
};
VertexID num_v = 0;
VertexID num_masters = 0;
// VertexID BATCH_SIZE = 0;
int host_id = 0;
int num_hosts = 0;
MPI_Datatype V_ID_Type;
std::vector<IndexType> L;
inline void bit_parallel_push_labels(
const DistGraph &G,
VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
UnweightedDist iter);
inline void bit_parallel_labeling(
const DistGraph &G,
std::vector<uint8_t> &used_bp_roots);
// inline void bit_parallel_push_labels(
// const DistGraph &G,
// VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// UnweightedDist iter);
// inline void bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots);
inline void batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start,
const VertexID roots_size,
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated);
// std::vector<bool> &once_candidated);
inline VertexID initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots);
// inline void push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter);
inline void local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
inline void local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
// inline void local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline bool distance_query(
VertexID cand_root_id,
VertexID v_id,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter);
inline void insert_label_only_seq(
VertexID cand_root_id,
// VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send);
// UnweightedDist iter);
inline void insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send);
inline void update_label_indices(
const VertexID v_id,
const VertexID inserted_count,
// std::vector<IndexType> &L,
// std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter);
inline void reset_at_end(
// const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table);
// template <typename E_T, typename F>
// inline void every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun);
template <typename E_T>
inline void one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv);
// // Function: get the destination host id which is i hop from this host.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_me_host_id(int hop) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// return (host_id + hop + num_hosts) % num_hosts;
// }
// // Function: get the destination host id which is i hop from the root.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_root_host_id(int hop, int root) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// assert(root >= 0 && root < num_hosts);
// return (root + hop + num_hosts) % num_hosts;
// }
size_t get_index_size()
{
size_t bytes = 0;
for (VertexID v_i = 0; v_i < num_masters; ++v_i) {
bytes += L[v_i].size();
}
return bytes;
}
// Test only
// uint64_t normal_hit_count = 0;
// uint64_t bp_hit_count = 0;
// uint64_t total_check_count = 0;
// uint64_t normal_check_count = 0;
// uint64_t total_candidates_num = 0;
// uint64_t set_candidates_num = 0;
// double initializing_time = 0;
// double candidating_time = 0;
// double adding_time = 0;
// double distance_query_time = 0;
// double init_index_time = 0;
// double init_dist_matrix_time = 0;
// double init_start_reset_time = 0;
// double init_indicators_time = 0;
//L2CacheMissRate cache_miss;
// double message_time = 0;
// double bp_labeling_time = 0;
// double initializing_time = 0;
// double scatter_time = 0;
// double gather_time = 0;
// double clearup_time = 0;
// TotalInstructsExe candidating_ins_count;
// TotalInstructsExe adding_ins_count;
// TotalInstructsExe bp_labeling_ins_count;
// TotalInstructsExe bp_checking_ins_count;
// TotalInstructsExe dist_query_ins_count;
// End test
public:
// std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0);
DistBVCPLL() = default;
explicit DistBVCPLL(
const DistGraph &G);
// UnweightedDist dist_distance_query_pair(
// VertexID a_global,
// VertexID b_global,
// const DistGraph &G);
}; // class DistBVCPLL
template <VertexID BATCH_SIZE>
DistBVCPLL<BATCH_SIZE>::
DistBVCPLL(
const DistGraph &G)
{
num_v = G.num_v;
assert(num_v >= BATCH_SIZE);
num_masters = G.num_masters;
host_id = G.host_id;
// {
// if (1 == host_id) {
// volatile int i = 0;
// while (i == 0) {
// sleep(5);
// }
// }
// }
num_hosts = G.num_hosts;
V_ID_Type = G.V_ID_Type;
// L.resize(num_v);
L.resize(num_masters);
VertexID remainer = num_v % BATCH_SIZE;
VertexID b_i_bound = num_v / BATCH_SIZE;
std::vector<uint8_t> used_bp_roots(num_v, 0);
//cache_miss.measure_start();
double time_labeling = -WallTimer::get_time_mark();
// bp_labeling_time -= WallTimer::get_time_mark();
bit_parallel_labeling(G,
used_bp_roots);
// bp_labeling_time += WallTimer::get_time_mark();
// {//test
////#ifdef DEBUG_MESSAGES_ON
// if (0 == host_id) {
// printf("host_id: %u bp_labeling_finished.\n", host_id);
// }
////#endif
// }
std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue.
VertexID end_active_queue = 0;
std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
// std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue.
VertexID end_got_candidates_queue = 0;
std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
// std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
std::vector<ShortIndex> short_index(num_masters);
std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST));
std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue.
// Used mainly for resetting short_index[v].indicator.
VertexID end_once_candidated_queue = 0;
std::vector<uint8_t> once_candidated(num_masters, false);
// std::vector<bool> once_candidated(num_masters, false);
std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table.
std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels
//printf("b_i_bound: %u\n", b_i_bound);//test
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
{// Batch number limit
if (10 == b_i) {
remainer = 0;
break;
}
}
// {
////#ifdef DEBUG_MESSAGES_ON
// if (0 == host_id) {
// printf("b_i: %u\n", b_i);//test
// }
////#endif
// }
batch_process(
G,
// b_i,
b_i * BATCH_SIZE,
BATCH_SIZE,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
// exit(EXIT_SUCCESS); //test
}
if (remainer != 0) {
// {
////#ifdef DEBUG_MESSAGES_ON
// if (0 == host_id) {
// printf("b_i: %u\n", b_i_bound);//test
// }
////#endif
// }
batch_process(
G,
// b_i_bound,
b_i_bound * BATCH_SIZE,
remainer,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
}
time_labeling += WallTimer::get_time_mark();
//cache_miss.measure_stop();
// Test
setlocale(LC_NUMERIC, "");
if (0 == host_id) {
printf("BATCH_SIZE: %u\n", BATCH_SIZE);
printf("BP_Size: %u\n", BITPARALLEL_SIZE);
}
{// Total Number of Labels
EdgeID local_num_labels = 0;
for (VertexID v_global = 0; v_global < num_v; ++v_global) {
if (G.get_master_host_id(v_global) != host_id) {
continue;
}
local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size();
}
EdgeID global_num_labels;
MPI_Allreduce(&local_num_labels,
&global_num_labels,
1,
MPI_Instance::get_mpi_datatype<EdgeID>(),
MPI_SUM,
MPI_COMM_WORLD);
// printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v);
}
// VertexID local_num_batches = 0;
// VertexID local_num_distances = 0;
//// double local_avg_distances_per_batches = 0;
// for (VertexID v_global = 0; v_global < num_v; ++v_global) {
// if (G.get_master_host_id(v_global) != host_id) {
// continue;
// }
// VertexID v_local = G.get_local_vertex_id(v_global);
// local_num_batches += L[v_local].batches.size();
// local_num_distances += L[v_local].distances.size();
//// double avg_d_p_b = 0;
//// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) {
//// avg_d_p_b += L[v_local].batches[i_b].size;
//// }
//// avg_d_p_b /= L[v_local].batches.size();
//// local_avg_distances_per_batches += avg_d_p_b;
// }
//// local_avg_distances_per_batches /= num_masters;
//// double local_avg_batches = local_num_batches * 1.0 / num_masters;
//// double local_avg_distances = local_num_distances * 1.0 / num_masters;
// uint64_t global_num_batches = 0;
// uint64_t global_num_distances = 0;
// MPI_Allreduce(
// &local_num_batches,
// &global_num_batches,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_batches /= num_hosts;
// MPI_Allreduce(
// &local_num_distances,
// &global_num_distances,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_distances /= num_hosts;
// double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches;
// double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances;
// double global_avg_batches = global_num_batches / num_v;
// double global_avg_distances = global_num_distances / num_v;
//// MPI_Allreduce(
//// &local_avg_distances_per_batches,
//// &global_avg_d_p_b,
//// 1,
//// MPI_DOUBLE,
//// MPI_SUM,
//// MPI_COMM_WORLD);
//// global_avg_d_p_b /= num_hosts;
// MPI_Barrier(MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("global_avg_batches: %f "
// "global_avg_distances: %f "
// "global_avg_distances_per_batch: %f "
// "global_avg_labels_per_distance: %f\n",
// global_avg_batches,
// global_avg_distances,
// global_avg_d_p_b,
// global_avg_l_p_d);
// }
}
// printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100);
// printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100);
// printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100);
// printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100);
// printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100);
// printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100);
// printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100);
// printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100);
// printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100);
// uint64_t total_check_count = bp_hit_count + normal_check_count;
// printf("total_check_count: %'llu\n", total_check_count);
// printf("bp_hit_count: %'llu %.2f%%\n",
// bp_hit_count,
// bp_hit_count * 100.0 / total_check_count);
// printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count);
// printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n",
// total_candidates_num,
// set_candidates_num,
// set_candidates_num * 100.0 / total_candidates_num);
// printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n",
// normal_hit_count,
// normal_hit_count * 100.0 / total_check_count,
// normal_hit_count * 100.0 / (total_check_count - bp_hit_count));
//cache_miss.print();
// printf("Candidating: "); candidating_ins_count.print();
// printf("Adding: "); adding_ins_count.print();
// printf("BP_Labeling: "); bp_labeling_ins_count.print();
// printf("BP_Checking: "); bp_checking_ins_count.print();
// printf("distance_query: "); dist_query_ins_count.print();
// printf("num_hosts: %u host_id: %u\n"
// "Local_labeling_time: %.2f seconds\n"
// "bp_labeling_time: %.2f %.2f%%\n"
// "initializing_time: %.2f %.2f%%\n"
// "scatter_time: %.2f %.2f%%\n"
// "gather_time: %.2f %.2f%%\n"
// "clearup_time: %.2f %.2f%%\n"
// "message_time: %.2f %.2f%%\n",
// num_hosts, host_id,
// time_labeling,
// bp_labeling_time, 100.0 * bp_labeling_time / time_labeling,
// initializing_time, 100.0 * initializing_time / time_labeling,
// scatter_time, 100.0 * scatter_time / time_labeling,
// gather_time, 100.0 * gather_time / time_labeling,
// clearup_time, 100.0 * clearup_time / time_labeling,
// message_time, 100.0 * message_time / time_labeling);
double global_time_labeling;
MPI_Allreduce(&time_labeling,
&global_time_labeling,
1,
MPI_DOUBLE,
MPI_MAX,
MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("num_hosts: %d "
"Global_labeling_time: %.2f seconds\n",
num_hosts,
global_time_labeling);
}
// End test
}
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling(
// const DistGraph &G,
// std::vector<uint8_t> &used_bp_roots)
//{
//// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
//
// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_v); // active queue
// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// while (r < num_v && used_bp_roots[r]) {
// ++r;
// }
// if (r == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// used_bp_roots[r] = true;
//
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// que[que_h++] = r;
// tmp_d[r] = 0;
// que_t1 = que_h;
//
// int ns = 0; // number of selected neighbor, default 64
// // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward
// // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF.
//// VertexID i_bound = G.vertices[r] - 1;
//// VertexID i_start = i_bound + G.out_degrees[r];
//// for (VertexID i = i_start; i > i_bound; --i) {
// //int i_bound = G.vertices[r];
// //int i_start = i_bound + G.out_degrees[r] - 1;
// //for (int i = i_start; i >= i_bound; --i) {
// VertexID d_i_bound = G.local_out_degrees[r];
// EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1;
// for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) {
// EdgeID i = i_start - d_i;
// VertexID v = G.out_edges[i];
// if (!used_bp_roots[v]) {
// used_bp_roots[v] = true;
// // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set)
// que[que_h++] = v;
// tmp_d[v] = 1;
// tmp_s[v].first = 1ULL << ns;
// if (++ns == 64) break;
// }
// }
// //}
//// }
//
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
// for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) {
// VertexID v = que[que_i];
//// bit_parallel_push_labels(G,
//// v,
//// que,
//// que_h,
//// sibling_es,
//// num_sibling_es,
//// child_es,
//// num_child_es,
//// tmp_d,
//// d);
// EdgeID i_start = G.vertices_idx[v];
// EdgeID i_bound = i_start + G.local_out_degrees[v];
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv = G.out_edges[i];
// UnweightedDist td = d + 1;
//
// if (d > tmp_d[tv]) {
// ;
// }
// else if (d == tmp_d[tv]) {
// if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v;
// sibling_es[num_sibling_es].second = tv;
// ++num_sibling_es;
// }
// } else { // d < tmp_d[tv]
// if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) {
// que[que_h++] = tv;
// tmp_d[tv] = td;
// }
// child_es[num_child_es].first = v;
// child_es[num_child_es].second = tv;
// ++num_child_es;
// }
// }
// }
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first;
// tmp_s[w].second |= tmp_s[v].first;
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
//
// {// test
// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (4 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//
// que_t0 = que_t1;
// que_t1 = que_h;
// }
//
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = tmp_d[v];
// L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1}
// L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_push_labels(
const DistGraph &G,
const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
const UnweightedDist iter)
{
EdgeID i_start = G.vertices_idx[v_global];
EdgeID i_bound = i_start + G.local_out_degrees[v_global];
// {//test
// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
// }
for (EdgeID i = i_start; i < i_bound; ++i) {
VertexID tv_global = G.out_edges[i];
VertexID tv_local = G.get_local_vertex_id(tv_global);
UnweightedDist td = iter + 1;
if (iter > dists[tv_local]) {
;
} else if (iter == dists[tv_local]) {
if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global;
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global;
++size_tmp_sibling_es;
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
}
} else { // iter < dists[tv]
if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) {
tmp_q[offset_tmp_q + size_tmp_q++] = tv_global;
}
}
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global;
tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global;
++size_tmp_child_es;
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
}
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_labeling(
const DistGraph &G,
// std::vector<IndexType> &L,
std::vector<uint8_t> &used_bp_roots)
{
// Class type of Bit-Parallel label message unit.
struct MsgUnitBP {
VertexID v_global;
uint64_t S_n1;
uint64_t S_0;
MsgUnitBP() = default;
// MsgUnitBP(MsgUnitBP&& other) = default;
// MsgUnitBP(MsgUnitBP& other) = default;
// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
: v_global(v), S_n1(sn1), S_0(s0) { }
};
// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
EdgeID local_num_edges = G.num_edges_local;
std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
std::vector<VertexID> que(num_masters); // active queue
VertexID end_que = 0;
std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
VertexID end_tmp_que = 0;
std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
VertexID r_global = 0; // root r
for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// {// test
// if (0 == host_id) {
// printf("i_bpsp: %u\n", i_bpspt);
// }
// }
// Select the root r_global
if (0 == host_id) {
while (r_global < num_v && used_bp_roots[r_global]) {
++r_global;
}
if (r_global == num_v) {
for (VertexID v = 0; v < num_v; ++v) {
L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
}
continue;
}
}
// Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&r_global,
1,
V_ID_Type,
0,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
// Mark the r_global
if (G.get_master_host_id(r_global) == host_id) {
tmp_d[G.get_local_vertex_id(r_global)] = 0;
que[end_que++] = r_global;
}
// Select the r_global's 64 neighbors
{
// Get r_global's neighbors into buffer_send, rank from high to low.
VertexID local_degree = G.local_out_degrees[r_global];
std::vector<VertexID> buffer_send(local_degree);
if (local_degree) {
EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
EdgeID e_i = e_i_start - d_i;
buffer_send[d_i] = G.out_edges[e_i];
}
}
// Get selected neighbors (up to 64)
std::vector<VertexID> selected_nbrs;
if (0 != host_id) {
// Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
MPI_Instance::send_buffer_2_dst(buffer_send,
0,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// Receive selected neighbors from host 0
MPI_Instance::recv_buffer_from_src(selected_nbrs,
0,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
} else {
// Host 0
// Host 0 receives neighbors from others
std::vector<VertexID> all_nbrs(buffer_send);
std::vector<VertexID > buffer_recv;
for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
MPI_Instance::recv_buffer_from_any(buffer_recv,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
if (buffer_recv.empty()) {
continue;
}
buffer_send.resize(buffer_send.size() + buffer_recv.size());
std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
all_nbrs.resize(buffer_send.size());
all_nbrs.assign(buffer_send.begin(), buffer_send.end());
}
assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// Select 64 (or less) neighbors
VertexID ns = 0; // number of selected neighbor, default 64
for (VertexID v_global : all_nbrs) {
if (used_bp_roots[v_global]) {
continue;
}
used_bp_roots[v_global] = 1;
selected_nbrs.push_back(v_global);
if (++ns == 64) {
break;
}
}
// Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
for (int dest = 1; dest < num_hosts; ++dest) {
MPI_Instance::send_buffer_2_dst(selected_nbrs,
dest,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
}
// message_time += WallTimer::get_time_mark();
}
// {//test
// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
// }
// Synchronize the used_bp_roots.
for (VertexID v_global : selected_nbrs) {
used_bp_roots[v_global] = 1;
}
// Mark selected neighbors
for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
VertexID v_global = selected_nbrs[v_i];
if (host_id != G.get_master_host_id(v_global)) {
continue;
}
tmp_que[end_tmp_que++] = v_global;
tmp_d[G.get_local_vertex_id(v_global)] = 1;
tmp_s[v_global].first = 1ULL << v_i;
}
}
// Reduce the global number of active vertices
VertexID global_num_actives = 1;
UnweightedDist d = 0;
while (global_num_actives) {
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("d: %u que_size: %u\n", d, global_num_actives);
// }
// }
//#endif
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
VertexID num_sibling_es = 0, num_child_es = 0;
// Send active masters to mirrors
{
std::vector<MsgUnitBP> buffer_send(end_que);
for (VertexID que_i = 0; que_i < end_que; ++que_i) {
VertexID v_global = que[que_i];
buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
}
// {// test
// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
// }
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgUnitBP> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
// For parallel adding to queue
VertexID size_buffer_recv = buffer_recv.size();
std::vector<VertexID> offsets_tmp_q(size_buffer_recv);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) {
offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global];
}
VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q);
std::vector<VertexID> tmp_q(num_neighbors);
std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0);
// For parallel adding to sibling_es
std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors);
std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0);
// For parallel adding to child_es
std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors);
std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0);
#pragma omp parallel for
// for (const MsgUnitBP &m : buffer_recv) {
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgUnitBP &m = buffer_recv[i_m];
VertexID v_global = m.v_global;
if (!G.local_out_degrees[v_global]) {
continue;
}
tmp_s[v_global].first = m.S_n1;
tmp_s[v_global].second = m.S_0;
// Push labels
bit_parallel_push_labels(
G,
v_global,
tmp_q,
sizes_tmp_q[i_m],
tmp_sibling_es,
sizes_tmp_sibling_es[i_m],
tmp_child_es,
sizes_tmp_child_es[i_m],
offsets_tmp_q[i_m],
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
tmp_d,
d);
}
{// From tmp_sibling_es to sibling_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es);
PADO::collect_into_queue(
tmp_sibling_es,
offsets_tmp_q,
sizes_tmp_sibling_es,
total_size_tmp,
sibling_es,
num_sibling_es);
}
{// From tmp_child_es to child_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es);
PADO::collect_into_queue(
tmp_child_es,
offsets_tmp_q,
sizes_tmp_child_es,
total_size_tmp,
child_es,
num_child_es);
}
{// From tmp_q to tmp_que
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q);
PADO::collect_into_queue(
tmp_q,
offsets_tmp_q,
sizes_tmp_q,
total_size_tmp,
tmp_que,
end_tmp_que);
}
// {// test
// printf("host_id: %u root: %u done push.\n", host_id, root);
// }
}
}
// Update the sets in tmp_s
{
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first, w = sibling_es[i].second;
__atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST);
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
}
// Put into the buffer sending to others
std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first;
VertexID w = sibling_es[i].second;
buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
}
// Send the messages
for (int root = 0; root < num_hosts; ++root) {
std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
size_t i_m_bound = buffer_recv.size();
#pragma omp parallel for
for (size_t i_m = 0; i_m < i_m_bound; ++i_m) {
const auto &m = buffer_recv[i_m];
__atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST);
}
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
}
#pragma omp parallel for
for (VertexID i = 0; i < num_child_es; ++i) {
VertexID v = child_es[i].first, c = child_es[i].second;
__atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST);
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
//
//// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (0 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//#endif
// Swap que and tmp_que
tmp_que.swap(que);
end_que = end_tmp_que;
end_tmp_que = 0;
MPI_Allreduce(&end_que,
&global_num_actives,
1,
V_ID_Type,
MPI_SUM,
MPI_COMM_WORLD);
// }
++d;
}
#pragma omp parallel for
for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
VertexID v_global = G.get_global_vertex_id(v_local);
L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
}
}
}
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_push_labels(
// const DistGraph &G,
// const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// const UnweightedDist iter)
//{
// EdgeID i_start = G.vertices_idx[v_global];
// EdgeID i_bound = i_start + G.local_out_degrees[v_global];
//// {//test
//// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
//// }
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv_global = G.out_edges[i];
// VertexID tv_local = G.get_local_vertex_id(tv_global);
// UnweightedDist td = iter + 1;
//
// if (iter > dists[tv_local]) {
// ;
// } else if (iter == dists[tv_local]) {
// if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
// }
// } else { // iter < dists[tv]
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
//// {
//// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test
//// }
// }
// }
//
//}
//
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots)
//{
// // Class type of Bit-Parallel label message unit.
// struct MsgUnitBP {
// VertexID v_global;
// uint64_t S_n1;
// uint64_t S_0;
//
// MsgUnitBP() = default;
//// MsgUnitBP(MsgUnitBP&& other) = default;
//// MsgUnitBP(MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
// MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
// : v_global(v), S_n1(sn1), S_0(s0) { }
// };
//// VertexID num_v = G.num_v;
//// EdgeID num_e = G.num_e;
// EdgeID local_num_edges = G.num_edges_local;
//
// std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_masters); // active queue
// VertexID end_que = 0;
// std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
// VertexID end_tmp_que = 0;
// std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
//
//// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
//// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
//// std::vector<VertexID> que(num_v); // active queue
//// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
//// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r_global = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// // Select the root r_global
// if (0 == host_id) {
// while (r_global < num_v && used_bp_roots[r_global]) {
// ++r_global;
// }
// if (r_global == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// }
// // Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&r_global,
// 1,
// V_ID_Type,
// 0,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
//
//// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// // Mark the r_global
// if (G.get_master_host_id(r_global) == host_id) {
// tmp_d[G.get_local_vertex_id(r_global)] = 0;
// que[end_que++] = r_global;
// }
// // Select the r_global's 64 neighbors
// {
// // Get r_global's neighbors into buffer_send, rank from low to high.
// VertexID local_degree = G.local_out_degrees[r_global];
// std::vector<VertexID> buffer_send(local_degree);
// if (local_degree) {
// EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
// for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
// EdgeID e_i = e_i_start - d_i;
// buffer_send[d_i] = G.out_edges[e_i];
// }
// }
//
// // Get selected neighbors (up to 64)
// std::vector<VertexID> selected_nbrs;
// if (0 != host_id) {
// // Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// 0,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
// // Receive selected neighbors from host 0
// MPI_Instance::recv_buffer_from_src(selected_nbrs,
// 0,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// } else {
// // Host 0
// // Host 0 receives neighbors from others
// std::vector<VertexID> all_nbrs(buffer_send);
// std::vector<VertexID > buffer_recv;
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::recv_buffer_from_any(buffer_recv,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
//// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv,
//// num_hosts,
//// SENDING_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// if (buffer_recv.empty()) {
// continue;
// }
//
// buffer_send.resize(buffer_send.size() + buffer_recv.size());
// std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
// all_nbrs.resize(buffer_send.size());
// all_nbrs.assign(buffer_send.begin(), buffer_send.end());
// }
// assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// // Select 64 (or less) neighbors
// VertexID ns = 0; // number of selected neighbor, default 64
// for (VertexID v_global : all_nbrs) {
// if (used_bp_roots[v_global]) {
// continue;
// }
// used_bp_roots[v_global] = 1;
// selected_nbrs.push_back(v_global);
// if (++ns == 64) {
// break;
// }
// }
// // Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
// for (int dest = 1; dest < num_hosts; ++dest) {
// MPI_Instance::send_buffer_2_dst(selected_nbrs,
// dest,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// }
// message_time += WallTimer::get_time_mark();
// }
//// {//test
//// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
//// }
//
// // Synchronize the used_bp_roots.
// for (VertexID v_global : selected_nbrs) {
// used_bp_roots[v_global] = 1;
// }
//
// // Mark selected neighbors
// for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
// VertexID v_global = selected_nbrs[v_i];
// if (host_id != G.get_master_host_id(v_global)) {
// continue;
// }
// tmp_que[end_tmp_que++] = v_global;
// tmp_d[G.get_local_vertex_id(v_global)] = 1;
// tmp_s[v_global].first = 1ULL << v_i;
// }
// }
//
// // Reduce the global number of active vertices
// VertexID global_num_actives = 1;
// UnweightedDist d = 0;
// while (global_num_actives) {
//// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
//
// // Send active masters to mirrors
// {
// std::vector<MsgUnitBP> buffer_send(end_que);
// for (VertexID que_i = 0; que_i < end_que; ++que_i) {
// VertexID v_global = que[que_i];
// buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
// }
//// {// test
//// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
//// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgUnitBP> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgUnitBP &m : buffer_recv) {
// VertexID v_global = m.v_global;
// if (!G.local_out_degrees[v_global]) {
// continue;
// }
// tmp_s[v_global].first = m.S_n1;
// tmp_s[v_global].second = m.S_0;
// // Push labels
// bit_parallel_push_labels(G,
// v_global,
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
// tmp_d,
// d);
// }
//// {// test
//// printf("host_id: %u root: %u done push.\n", host_id, root);
//// }
// }
// }
//
// // Update the sets in tmp_s
// {
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
//
// }
// // Put into the buffer sending to others
// std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
//// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1);
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first;
// VertexID w = sibling_es[i].second;
//// buffer_send.emplace_back(v, tmp_s[v].second);
//// buffer_send.emplace_back(w, tmp_s[w].second);
// buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
// buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
// }
// // Send the messages
// for (int root = 0; root < num_hosts; ++root) {
// std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
// }
////#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
// }
////#endif
//
// // Swap que and tmp_que
// tmp_que.swap(que);
// end_que = end_tmp_que;
// end_tmp_que = 0;
// MPI_Allreduce(&end_que,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
//
//// }
// ++d;
// }
//
// for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
// VertexID v_global = G.get_global_vertex_id(v_local);
// L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
// L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
// L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//}
//// Function bit parallel checking:
//// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking(
// VertexID v_id,
// VertexID w_id,
// const std::vector<IndexType> &L,
// UnweightedDist iter)
//{
// // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already
// const IndexType &Lv = L[v_id];
// const IndexType &Lw = L[w_id];
//
// _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0);
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF.
// if (td - 2 <= iter) {
// td +=
// (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 :
// ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) |
// (Lv.bp_sets[i][1] & Lw.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
//// ++bp_hit_count;
// return false;
// }
// }
// }
// return true;
//}
// Function for initializing at the begin of a batch
// For a batch, initialize the temporary labels and real labels of roots;
// traverse roots' labels to initialize distance buffer;
// unset flag arrays is_active and got_labels
template <VertexID BATCH_SIZE>
inline VertexID DistBVCPLL<BATCH_SIZE>::
initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots)
{
// Get the roots_master_local, containing all local roots.
std::vector<VertexID> roots_master_local;
VertexID roots_bound = roots_start + roots_size;
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
roots_master_local.push_back(G.get_local_vertex_id(r_global));
}
}
VertexID size_roots_master_local = roots_master_local.size();
// Short_index
{
if (end_once_candidated_queue >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
} else {
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
}
end_once_candidated_queue = 0;
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
} else {
for (VertexID r_local : roots_master_local) {
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
}
}
//
// Real Index
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
} else {
for (VertexID r_local : roots_master_local) {
IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
}
}
// Dist Table
{
// struct LabelTableUnit {
// VertexID root_id;
// VertexID label_global_id;
// UnweightedDist dist;
//
// LabelTableUnit() = default;
//
// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
// root_id(r), label_global_id(l), dist(d) {}
// };
std::vector<LabelTableUnit> buffer_send; // buffer for sending
// Dist_matrix
{
// Deprecated Old method: unpack the IndexType structure before sending.
// Okay, it's back.
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
// Offsets for adding labels to buffer_send in parallel
std::vector<VertexID> offsets_beffer_send(size_roots_master_local);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
offsets_beffer_send[i_r] = L[r_local].vertices.size();
}
EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send);
buffer_send.resize(size_labels);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
VertexID top_location = 0;
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lr.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
// buffer_send[offsets_beffer_send[i_r] + top_location++] =
// LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist);
buffer_send[offsets_beffer_send[i_r] + top_location++] =
LabelTableUnit(r_root_id, Lr.vertices[v_i], dist);
}
}
// }
}
} else {
for (VertexID r_local : roots_master_local) {
// The distance table.
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lr.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
buffer_send.emplace_back(r_root_id, Lr.vertices[v_i],
dist); // buffer for sending
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
}
}
// }
}
}
}
// Broadcast local roots labels
for (int root = 0; root < num_hosts; ++root) {
std::vector<LabelTableUnit> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record root_id's number of its received label, for later adding to recved_dist_table
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
// recved_dist_table[root_id].push_back(label_global_id);
}
// Record the received label in recved_dist_table, for later reset
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID &size = sizes_recved_root_labels[root_id];
if (size) {
recved_dist_table[root_id].resize(size);
size = 0;
}
}
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id);
}
} else {
for (const LabelTableUnit &l : buffer_recv) {
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record the received label in recved_dist_table, for later reset
recved_dist_table[root_id].push_back(label_global_id);
}
}
}
}
// Build the Bit-Parallel Labels Table
{
// struct MsgBPLabel {
// VertexID r_root_id;
// UnweightedDist bp_dist[BITPARALLEL_SIZE];
// uint64_t bp_sets[BITPARALLEL_SIZE][2];
//
// MsgBPLabel() = default;
// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
// : r_root_id(r)
// {
// memcpy(bp_dist, dist, sizeof(bp_dist));
// memcpy(bp_sets, sets, sizeof(bp_sets));
// }
// };
// std::vector<MPI_Request> requests_send(num_hosts - 1);
std::vector<MsgBPLabel> buffer_send;
std::vector<VertexID> roots_queue;
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) != host_id) {
continue;
}
roots_queue.push_back(r_global);
}
VertexID size_roots_queue = roots_queue.size();
if (size_roots_queue >= THRESHOLD_PARALLEL) {
buffer_send.resize(size_roots_queue);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) {
VertexID r_global = roots_queue[i_r];
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
} else {
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
for (VertexID r_global : roots_queue) {
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Local roots
// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// Prepare for sending
buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
}
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgBPLabel> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
VertexID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgBPLabel &m = buffer_recv[i_m];
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
} else {
for (const MsgBPLabel &m : buffer_recv) {
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
}
}
}
// Active_queue
VertexID global_num_actives = 0; // global number of active vertices.
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
active_queue[i_r] = r_local;
}
end_active_queue = size_roots_master_local;
} else {
for (VertexID r_local : roots_master_local) {
active_queue[end_active_queue++] = r_local;
}
}
// Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
MPI_SUM,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
}
return global_num_actives;
}
// Sequential Version
//// Function for initializing at the begin of a batch
//// For a batch, initialize the temporary labels and real labels of roots;
//// traverse roots' labels to initialize distance buffer;
//// unset flag arrays is_active and got_labels
//template <VertexID BATCH_SIZE>
//inline VertexID DistBVCPLL<BATCH_SIZE>::
//initialization(
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated,
// VertexID b_id,
// VertexID roots_start,
// VertexID roots_size,
//// std::vector<VertexID> &roots_master_local,
// const std::vector<uint8_t> &used_bp_roots)
//{
// // Get the roots_master_local, containing all local roots.
// std::vector<VertexID> roots_master_local;
// VertexID roots_bound = roots_start + roots_size;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
// roots_master_local.push_back(G.get_local_vertex_id(r_global));
// }
// }
// // Short_index
// {
// for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
// VertexID v_local = once_candidated_queue[v_i];
// short_index[v_local].indicator_reset();
// once_candidated[v_local] = 0;
// }
// end_once_candidated_queue = 0;
// for (VertexID r_local : roots_master_local) {
// short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
//// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself
//// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels
// }
// }
////
// // Real Index
// {
// for (VertexID r_local : roots_master_local) {
// IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
// Lr.distances.emplace_back(
// Lr.vertices.size(), // start_index
// 1, // size
// 0); // dist
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
// }
// }
//
// // Dist Table
// {
//// struct LabelTableUnit {
//// VertexID root_id;
//// VertexID label_global_id;
//// UnweightedDist dist;
////
//// LabelTableUnit() = default;
////
//// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
//// root_id(r), label_global_id(l), dist(d) {}
//// };
// std::vector<LabelTableUnit> buffer_send; // buffer for sending
// // Dist_matrix
// {
// // Deprecated Old method: unpack the IndexType structure before sending.
// for (VertexID r_local : roots_master_local) {
// // The distance table.
// IndexType &Lr = L[r_local];
// VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// // Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// UnweightedDist dist = Lr.distances[dist_i].dist;
// // Traverse vertices array
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// // Write into the dist_table
//// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
// }
// }
// }
// }
// }
// // Broadcast local roots labels
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<LabelTableUnit> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const LabelTableUnit &l : buffer_recv) {
// VertexID root_id = l.root_id;
// VertexID label_global_id = l.label_global_id;
// UnweightedDist dist = l.dist;
// dist_table[root_id][label_global_id] = dist;
// // Record the received label in recved_dist_table, for later reset
// recved_dist_table[root_id].push_back(label_global_id);
// }
// }
// }
//
// // Build the Bit-Parallel Labels Table
// {
//// struct MsgBPLabel {
//// VertexID r_root_id;
//// UnweightedDist bp_dist[BITPARALLEL_SIZE];
//// uint64_t bp_sets[BITPARALLEL_SIZE][2];
////
//// MsgBPLabel() = default;
//// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
//// : r_root_id(r)
//// {
//// memcpy(bp_dist, dist, sizeof(bp_dist));
//// memcpy(bp_sets, sets, sizeof(bp_sets));
//// }
//// };
//// std::vector<MPI_Request> requests_send(num_hosts - 1);
// std::vector<MsgBPLabel> buffer_send;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
// VertexID r_local = G.get_local_vertex_id(r_global);
// VertexID r_root = r_global - roots_start;
// // Local roots
//// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
//// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// // Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgBPLabel> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgBPLabel &m : buffer_recv) {
// VertexID r_root = m.r_root_id;
// memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// }
// }
// }
//
// // TODO: parallel enqueue
// // Active_queue
// VertexID global_num_actives = 0; // global number of active vertices.
// {
// for (VertexID r_local : roots_master_local) {
// active_queue[end_active_queue++] = r_local;
// }
// // Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
//
// return global_num_actives;
//}
//// Function: push v_head_global's newly added labels to its all neighbors.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// VertexID label_global_id = label_root_id + roots_start;
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// if (v_tail_global <= label_global_id) {
// // remaining v_tail_global has higher rank than the label
// return;
// }
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
//// {// Just for the complain from the compiler
//// assert(iter >= iter);
//// }
//}
template<VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter)
{
std::vector<std::pair<VertexID, VertexID> > buffer_send_indices;
//.first: Vertex ID
//.second: size of labels
std::vector<VertexID> buffer_send_labels;
if (local_size) {
const VertexID start_active_queue = global_start;
const VertexID size_active_queue = global_size <= local_size ?
global_size :
local_size;
const VertexID bound_active_queue = start_active_queue + size_active_queue;
buffer_send_indices.resize(size_active_queue);
// Prepare offset for inserting
std::vector<VertexID> offsets_buffer_locs(size_active_queue);
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
const IndexType &Lv = L[v_head_local];
offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
}
EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
// {// test
// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID);
// PADO::Utils::system_memory(memtotal, memfree);
// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n",
// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024);
// }
// }
buffer_send_labels.resize(size_buffer_send_labels);
// {// test
// if (0 == host_id) {
// printf("buffer_send_labels created.\n");
// }
// }
// Build buffer_send_labels by parallel inserting
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
VertexID v_head_global = G.get_global_vertex_id(v_head_local);
const IndexType &Lv = L[v_head_local];
// Prepare the buffer_send_indices
VertexID tmp_i_q = i_q - start_active_queue;
buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// These 2 index are used for traversing v_head's last inserted labels
VertexID l_i_start = Lv.distances.rbegin()->start_index;
VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
VertexID top_labels = offsets_buffer_locs[tmp_i_q];
for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
VertexID label_root_id = Lv.vertices[l_i] - roots_start;
buffer_send_labels[top_labels++] = label_root_id;
// buffer_send_labels.push_back(label_root_id);
}
}
}
////////////////////////////////////////////////
////
// const VertexID bound_active_queue = start_active_queue + size_active_queue;
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// // Parallel Version
// // Prepare offset for inserting
// std::vector<VertexID> offsets_buffer_locs(size_active_queue);
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// const IndexType &Lv = L[v_head_local];
// offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
// }
// EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
//// {// test
//// if (0 == host_id) {
//// double memtotal = 0;
//// double memfree = 0;
//// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID);
//// PADO::Utils::system_memory(memtotal, memfree);
//// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n",
//// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024);
//// }
//// }
// buffer_send_labels.resize(size_buffer_send_labels);
//// {// test
//// if (0 == host_id) {
//// printf("buffer_send_labels created.\n");
//// }
//// }
//
// // Build buffer_send_labels by parallel inserting
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID tmp_i_q = i_q - start_active_queue;
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// VertexID top_labels = offsets_buffer_locs[tmp_i_q];
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels[top_labels++] = label_root_id;
//// buffer_send_labels.push_back(label_root_id);
// }
// }
//// end_active_queue = 0;
////
////////////////////////////////////////////////
for (int root = 0; root < num_hosts; ++root) {
// Get the indices
std::vector<std::pair<VertexID, VertexID> > indices_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_indices,
indices_buffer);
if (indices_buffer.empty()) {
continue;
}
// Get the labels
std::vector<VertexID> labels_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_labels,
labels_buffer);
VertexID size_indices_buffer = indices_buffer.size();
// Prepare the offsets for reading indices_buffer
std::vector<EdgeID> starts_locs_index(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
starts_locs_index[i_i] = e.second;
}
EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index);
// Prepare the offsets for inserting v_tails into queue
std::vector<VertexID> offsets_tmp_queue(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
offsets_tmp_queue[i_i] = G.local_out_degrees[e.first];
}
EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue);
std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs);
std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0);
std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs);
std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
VertexID v_head_global = indices_buffer[i_i].first;
EdgeID start_index = starts_locs_index[i_i];
EdgeID bound_index = i_i != size_indices_buffer - 1 ?
starts_locs_index[i_i + 1] : total_recved_labels;
if (G.local_out_degrees[v_head_global]) {
local_push_labels_para(
v_head_global,
start_index,
bound_index,
roots_start,
labels_buffer,
G,
short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
tmp_got_candidates_queue,
sizes_tmp_got_candidates_queue[i_i],
offsets_tmp_queue[i_i],
got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
tmp_once_candidated_queue,
sizes_tmp_once_candidated_queue[i_i],
once_candidated,
bp_labels_table,
used_bp_roots,
iter);
}
}
{// Collect elements from tmp_got_candidates_queue to got_candidates_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue);
PADO::collect_into_queue(
tmp_got_candidates_queue,
offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue
sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue
total_new,
got_candidates_queue,
end_got_candidates_queue);
}
{// Collect elements from tmp_once_candidated_queue to once_candidated_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue);
PADO::collect_into_queue(
tmp_once_candidated_queue,
offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue
sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue
total_new,
once_candidated_queue,
end_once_candidated_queue);
}
}
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator[label_root_id] = 1;
{// Deal with race condition
if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
// The label is already selected before
continue;
}
}
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local;
}
// once_candidated[v_tail_local] = 1;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = 1;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!SI_v_tail.is_candidate[label_root_id]) {
if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id);
}
}
// Add into got_candidates queue
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = 1;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
if (!got_candidates[v_tail_local]) {
if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local;
}
}
}
}
// {
// assert(iter >= iter);
// }
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
if (SI_v_tail.indicator[label_root_id]) {
// The label is already selected before
continue;
}
// Record label_root_id as once selected by v_tail_global
SI_v_tail.indicator[label_root_id] = 1;
// SI_v_tail.indicator.set(label_root_id);
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
once_candidated[v_tail_local] = 1;
once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
if (SI_v_tail.is_candidate[label_root_id]) {
continue;
}
SI_v_tail.is_candidate[label_root_id] = 1;
SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!got_candidates[v_tail_local]) {
// If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
got_candidates[v_tail_local] = 1;
got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
}
}
}
// {
// assert(iter >= iter);
// }
}
//// Function: pushes v_head's labels to v_head's every (master) neighbor
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// // The data structure of a message
//// std::vector< LabelUnitType > buffer_recv;
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin() -> start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size;
// // Traverse v_head's every neighbor v_tail
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// // Traverse v_head's last inserted labels
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// VertexID label_global_id = label_root_id + roots_start;
// if (v_tail_global <= label_global_id) {
// // v_tail_global has higher rank than the label
// continue;
// }
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
//
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
// }
//
// {
// assert(iter >= iter);
// }
//}
//// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts
//// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all
//// code of this function into the caller, all messages become right.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//sync_masters_2_mirrors(
// const DistGraph &G,
// const std::vector<VertexID> &active_queue,
// VertexID end_active_queue,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send,
// std::vector<MPI_Request> &requests_send
//)
//{
//// std::vector< std::pair<VertexID, VertexID> > buffer_send;
// // pair.first: Owener vertex ID of the label
// // pair.first: label vertex ID of the label
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send.emplace_back(v_head_global, label_root_id);
//// {//test
//// if (1 == host_id) {
//// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);//
//// }
//// }
// }
// }
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// assert(!requests_send.empty());
// }
//
// // Send messages
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc);
// MPI_Isend(buffer_send.data(),
// MPI_Instance::get_sending_size(buffer_send),
// MPI_CHAR,
// dest_host_id,
// SENDING_MASTERS_TO_MIRRORS,
// MPI_COMM_WORLD,
// &requests_send[loc]);
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// }
// }
//}
// Function for distance query;
// traverse vertex v_id's labels;
// return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label.
template <VertexID BATCH_SIZE>
inline bool DistBVCPLL<BATCH_SIZE>::
distance_query(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter)
{
VertexID cand_real_id = cand_root_id + roots_start;
const IndexType &Lv = L[v_id_local];
// Traverse v_id's all existing labels
// VertexID b_i_bound = Lv.batches.size();
// _mm_prefetch(&Lv.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lv.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lv.vertices[0], _MM_HINT_T0);
//_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lv.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size;
// Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lv.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
UnweightedDist dist = Lv.distances[dist_i].dist;
// Cannot use this, because no batch_id any more, so distances are not all in order among batches.
// if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered.
// // If the half path distance is already greater than their targeted distance, jump to next batch
// break;
// }
VertexID v_start_index = Lv.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size;
// _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
_mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0);
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id
VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id
if (v >= cand_real_id) {
// Vertex cand_real_id cannot have labels whose ranks are lower than it,
// in which case dist_table[cand_root_id][v] does not exist.
continue;
}
VertexID d_tmp = dist + dist_table[cand_root_id][v];
if (d_tmp <= iter) {
return false;
}
}
}
// }
return true;
}
//// Sequential version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_seq(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send)
// UnweightedDist iter)
{
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// dist_table[v_root_id][cand_real_id] = iter;
// Put the update into the buffer_send for later sending
buffer_send.emplace_back(v_root_id, cand_real_id);
}
}
//// Parallel Version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send)
{
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// Put the update into the buffer_send for later sending
tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id);
}
}
// Function updates those index arrays in v_id's label only if v_id has been inserted new labels
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
update_label_indices(
const VertexID v_id_local,
const VertexID inserted_count,
// std::vector<IndexType> &L,
// std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter)
{
IndexType &Lv = L[v_id_local];
// // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch
// if (short_index[v_id_local].indicator[BATCH_SIZE]) {
// // Increase the batches' last element's size because a new distance element need to be added
// ++(Lv.batches.rbegin() -> size);
// } else {
// short_index[v_id_local].indicator[BATCH_SIZE] = 1;
//// short_index[v_id_local].indicator.set(BATCH_SIZE);
// // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added
// Lv.batches.emplace_back(
// b_id, // batch id
// Lv.distances.size(), // start index
// 1); // size
// }
// Insert a new distance element with start_index, size, and dist
Lv.distances.emplace_back(
Lv.vertices.size() - inserted_count, // start index
inserted_count, // size
iter); // distance
}
// Function to reset dist_table the distance buffer to INF
// Traverse every root's labels to reset its distance buffer elements to INF.
// In this way to reduce the cost of initialization of the next batch.
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
reset_at_end(
// const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table)
{
// // Reset dist_table according to local masters' labels
// for (VertexID r_local_id : roots_master_local) {
// IndexType &Lr = L[r_local_id];
// VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST;
// }
// }
// }
// }
// Reset dist_table according to received masters' labels from other hosts
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
for (VertexID cand_real_id : recved_dist_table[r_root_id]) {
dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST;
}
recved_dist_table[r_root_id].clear();
}
// Reset bit-parallel labels table
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist));
memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets));
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start, // start id of roots
const VertexID roots_size, // how many roots in the batch
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated)
// std::vector<bool> &once_candidated)
{
// At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
VertexID global_num_actives = initialization(G,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
active_queue,
end_active_queue,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
// b_id,
roots_start,
roots_size,
// roots_master_local,
used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
UnweightedDist iter = 0; // The iterator, also the distance for current iteration
// {//test
// if (0 == host_id) {
// printf("host_id: %u initialization finished.\n", host_id);
// }
// }
while (global_num_actives) {
++iter;
//#ifdef DEBUG_MESSAGES_ON
// {//test
//// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("iter: %u "
// "host_id: %d "
// "global_num_actives: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// iter,
// host_id,
// global_num_actives,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
//// }
// }
//#endif
// Traverse active vertices to push their labels as candidates
// Send masters' newly added labels to other hosts
{
// scatter_time -= WallTimer::get_time_mark();
// Divide the pushing into many-time runs.
const VertexID chunk_size = 1 << 24;
VertexID remainder = global_num_actives % chunk_size;
VertexID bound_global_i = global_num_actives - remainder;
// VertexID remainder = end_active_queue % chunk_size;
// VertexID bound_active_queue = end_active_queue - remainder;
VertexID local_size;
for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) {
if (global_i < end_active_queue) {
local_size = end_active_queue - global_i;
} else {
local_size = 0;
}
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
global_i,
chunk_size,
local_size,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
}
if (remainder) {
if (bound_global_i < end_active_queue) {
local_size = end_active_queue - bound_global_i;
} else {
local_size = 0;
}
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
bound_global_i,
remainder,
local_size,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
}
//
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// 0,
// end_active_queue,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
end_active_queue = 0;
// scatter_time += WallTimer::get_time_mark();
}
//// For Backup
// {
// scatter_time -= WallTimer::get_time_mark();
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// if (end_active_queue >= THRESHOLD_PARALLEL) {
// // Parallel Version
// // Prepare offset for inserting
// std::vector<VertexID> offsets_buffer_locs(end_active_queue);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// const IndexType &Lv = L[v_head_local];
// offsets_buffer_locs[i_q] = Lv.distances.rbegin()->size;
// }
// EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
// buffer_send_labels.resize(size_buffer_send_labels);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID top_labels = 0;
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels[offsets_buffer_locs[i_q] + top_labels++] = label_root_id;
//// buffer_send_labels.push_back(label_root_id);
// }
// }
// } else {
// // Sequential Version
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels.push_back(label_root_id);
// }
// }
// }
// end_active_queue = 0;
//
// for (int root = 0; root < num_hosts; ++root) {
// // Get the indices
// std::vector< std::pair<VertexID, VertexID> > indices_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_indices,
// indices_buffer);
// if (indices_buffer.empty()) {
// continue;
// }
// // Get the labels
// std::vector<VertexID> labels_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_labels,
// labels_buffer);
//
// VertexID size_indices_buffer = indices_buffer.size();
// if (size_indices_buffer >= THRESHOLD_PARALLEL) {
// // Prepare the offsets for reading indices_buffer
// std::vector<EdgeID> starts_locs_index(size_indices_buffer);
//#pragma omp parallel for
// for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
// const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
// starts_locs_index[i_i] = e.second;
// }
// EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index);
//
// // Prepare the offsets for inserting v_tails into queue
// std::vector<VertexID> offsets_tmp_queue(size_indices_buffer);
//#pragma omp parallel for
// for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
// const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
// offsets_tmp_queue[i_i] = G.local_out_degrees[e.first];
// }
// EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue);
// std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs);
// std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0);
// std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs);
// std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0);
//#pragma omp parallel for
// for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
// VertexID v_head_global = indices_buffer[i_i].first;
// EdgeID start_index = starts_locs_index[i_i];
// EdgeID bound_index = i_i != size_indices_buffer - 1 ?
// starts_locs_index[i_i + 1] : total_recved_labels;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels_para(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// // std::vector<VertexID> &got_candidates_queue,
// // VertexID &end_got_candidates_queue,
// tmp_got_candidates_queue,
// sizes_tmp_got_candidates_queue[i_i],
// offsets_tmp_queue[i_i],
// got_candidates,
// // std::vector<VertexID> &once_candidated_queue,
// // VertexID &end_once_candidated_queue,
// tmp_once_candidated_queue,
// sizes_tmp_once_candidated_queue[i_i],
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// }
//
// {// Collect elements from tmp_got_candidates_queue to got_candidates_queue
// VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue);
// PADO::collect_into_queue(
// tmp_got_candidates_queue,
// offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue
// sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue
// total_new,
// got_candidates_queue,
// end_got_candidates_queue);
// }
// {// Collect elements from tmp_once_candidated_queue to once_candidated_queue
// VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue);
// PADO::collect_into_queue(
// tmp_once_candidated_queue,
// offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue
// sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue
// total_new,
// once_candidated_queue,
// end_once_candidated_queue);
// }
// } else {
// // Sequential Version
// // Push those labels
// EdgeID start_index = 0;
// for (const std::pair<VertexID, VertexID> &e : indices_buffer) {
// VertexID v_head_global = e.first;
// EdgeID bound_index = start_index + e.second;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels_seq(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// got_candidates_queue,
// end_got_candidates_queue,
// got_candidates,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// start_index = bound_index;
// }
// }
// }
// scatter_time += WallTimer::get_time_mark();
// }
// {//test
// if (0 == host_id) {
// printf("iter: %u pushing labels finished.\n", iter);
// }
// }
// Traverse vertices in the got_candidates_queue to insert labels
{
// gather_time -= WallTimer::get_time_mark();
std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// pair.first: root id
// pair.second: label (global) id of the root
// if (true) {
if (end_got_candidates_queue >= THRESHOLD_PARALLEL) {
// Prepare for parallel active_queue
// Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already.
// Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it.
std::vector<VertexID> offsets_tmp_active_queue(end_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
offsets_tmp_active_queue[i_q] = i_q;
}
std::vector<VertexID> tmp_active_queue(end_got_candidates_queue);
std::vector<VertexID> sizes_tmp_active_queue(end_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually.
// Prepare for parallel buffer_send
std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
VertexID v_id_local = got_candidates_queue[i_q];
VertexID v_global_id = G.get_global_vertex_id(v_id_local);
if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// If v_global_id is root, its new labels should be put into buffer_send
offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que;
} else {
offsets_tmp_buffer_send[i_q] = 0;
}
}
EdgeID total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
// {// test
// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// double bytes_buffer_send = total_send_labels * sizeof(VertexID);
// PADO::Utils::system_memory(memtotal, memfree);
// printf("bytes_tmp_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n",
// bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024);
// }
// }
std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send(total_send_labels);
// {// test
// if (0 == host_id) {
// printf("tmp_buffer_send created.\n");
// }
// }
std::vector<EdgeID> sizes_tmp_buffer_send(end_got_candidates_queue, 0);
#pragma omp parallel for
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_para(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
tmp_buffer_send,
sizes_tmp_buffer_send[i_queue],
offsets_tmp_buffer_send[i_queue]);
// buffer_send);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
// short_index,
// b_id,
iter);
}
}
{// Collect elements from tmp_active_queue to active_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
PADO::collect_into_queue(
tmp_active_queue,
offsets_tmp_active_queue,
sizes_tmp_active_queue,
total_new,
active_queue,
end_active_queue);
}
{// Collect elements from tmp_buffer_send to buffer_send
EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
// {// test
// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// double bytes_buffer_send = total_new * sizeof(VertexID);
// PADO::Utils::system_memory(memtotal, memfree);
// printf("bytes_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n",
// bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024);
// }
// }
buffer_send.resize(total_new);
// {// test
// if (0 == host_id) {
// printf("buffer_send created.\n");
// }
// }
EdgeID zero_size = 0;
PADO::collect_into_queue(
tmp_buffer_send,
offsets_tmp_buffer_send,
sizes_tmp_buffer_send,
total_new,
buffer_send,
zero_size);
// {//test
// if (iter == 6) {
// for (VertexID i_b = 0; i_b < total_new; ++i_b) {
// const auto &e = buffer_send[i_b];
// VertexID root_id = e.first;
// VertexID cand_real_id = e.second;
// if (root_id > 1024) {
// printf("total_new: %lu "
// "buffer_send[%u]: "
// "root_id: %u "
// "cand_real_id: %u\n",
// total_new,
// i_b,
// root_id,
// cand_real_id);
// exit(1);
// }
// }
// }
// }
}
} else {
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
active_queue[end_active_queue++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_seq(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
// dist_table,
buffer_send);
// iter);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
// short_index,
// b_id,
iter);
}
}
}
// {//test
// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
// }
end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// Sync the dist_table
for (int root = 0; root < num_hosts; ++root) {
std::vector<std::pair<VertexID, VertexID>> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
// Get label number for every root
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
}
// Resize the recved_dist_table for every root
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID old_size = recved_dist_table[root_id].size();
VertexID tmp_size = sizes_recved_root_labels[root_id];
if (tmp_size) {
recved_dist_table[root_id].resize(old_size + tmp_size);
sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// If tmp_size == 0, root_id has no received labels.
// sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// Recorde received labels in recved_dist_table
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id);
}
} else {
for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
// Record the received element, for future reset
recved_dist_table[root_id].push_back(cand_real_id);
}
}
}
// Sync the global_num_actives
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
// MPI_SUM,
MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
}
// {//test
// if (0 == host_id) {
// printf("iter: %u inserting labels finished.\n", iter);
// }
// }
}
// Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
reset_at_end(
// G,
// roots_start,
// roots_master_local,
dist_table,
recved_dist_table,
bp_labels_table);
// clearup_time += WallTimer::get_time_mark();
// {//test
// if (0 == host_id) {
// printf("host_id: %u resetting finished.\n", host_id);
// }
// }
}
//// Sequential Version
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//batch_process(
// const DistGraph &G,
// VertexID b_id,
// VertexID roots_start, // start id of roots
// VertexID roots_size, // how many roots in the batch
// const std::vector<uint8_t> &used_bp_roots,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<uint8_t> &got_candidates,
//// std::vector<bool> &got_candidates,
// std::vector<uint8_t> &is_active,
//// std::vector<bool> &is_active,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated)
//// std::vector<bool> &once_candidated)
//{
// // At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// VertexID global_num_actives = initialization(G,
// short_index,
// dist_table,
// recved_dist_table,
// bp_labels_table,
// active_queue,
// end_active_queue,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// b_id,
// roots_start,
// roots_size,
//// roots_master_local,
// used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
// UnweightedDist iter = 0; // The iterator, also the distance for current iteration
//// {//test
//// printf("host_id: %u initialization finished.\n", host_id);
//// }
//
//
// while (global_num_actives) {
////#ifdef DEBUG_MESSAGES_ON
//// {//
//// if (0 == host_id) {
//// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives);
//// }
//// }
////#endif
// ++iter;
// // Traverse active vertices to push their labels as candidates
// // Send masters' newly added labels to other hosts
// {
// scatter_time -= WallTimer::get_time_mark();
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels.push_back(label_root_id);
// }
// }
// end_active_queue = 0;
//
// for (int root = 0; root < num_hosts; ++root) {
// // Get the indices
// std::vector< std::pair<VertexID, VertexID> > indices_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_indices,
// indices_buffer);
// if (indices_buffer.empty()) {
// continue;
// }
// // Get the labels
// std::vector<VertexID> labels_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_labels,
// labels_buffer);
// // Push those labels
// EdgeID start_index = 0;
// for (const std::pair<VertexID, VertexID> e : indices_buffer) {
// VertexID v_head_global = e.first;
// EdgeID bound_index = start_index + e.second;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// got_candidates_queue,
// end_got_candidates_queue,
// got_candidates,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// start_index = bound_index;
// }
// }
// scatter_time += WallTimer::get_time_mark();
// }
//
// // Traverse vertices in the got_candidates_queue to insert labels
// {
// gather_time -= WallTimer::get_time_mark();
// std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// // pair.first: root id
// // pair.second: label (global) id of the root
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if ( distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter) ) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
//// dist_table,
// buffer_send);
//// iter);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
// short_index,
// b_id,
// iter);
// }
// }
//// {//test
//// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
//// }
// end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// // Sync the dist_table
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<std::pair<VertexID, VertexID>> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
// VertexID root_id = e.first;
// VertexID cand_real_id = e.second;
// dist_table[root_id][cand_real_id] = iter;
// // Record the received element, for future reset
// recved_dist_table[root_id].push_back(cand_real_id);
// }
// }
//
// // Sync the global_num_actives
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
// }
// }
//
// // Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
// reset_at_end(
//// G,
//// roots_start,
//// roots_master_local,
// dist_table,
// recved_dist_table,
// bp_labels_table);
// clearup_time += WallTimer::get_time_mark();
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Every host h_i broadcast to others
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<E_T> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
//// uint64_t size_buffer_send = buffer_send.size();
//// // Sync the size_buffer_send.
//// message_time -= WallTimer::get_time_mark();
//// MPI_Bcast(&size_buffer_send,
//// 1,
//// MPI_UINT64_T,
//// root,
//// MPI_COMM_WORLD);
//// message_time += WallTimer::get_time_mark();
////// {// test
////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
////// }
//// if (!size_buffer_send) {
//// continue;
//// }
//// message_time -= WallTimer::get_time_mark();
//// std::vector<E_T> buffer_recv(size_buffer_send);
//// if (host_id == root) {
//// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
//// }
//// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
//// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) {
//// // Only need 1 broadcast
////
//// MPI_Bcast(buffer_recv.data(),
//// bytes_buffer_send,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// } else {
//// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
//// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
//// size_t offset = 0;
//// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
////// size_t offset = b_i * unit_buffer_size;
//// size_t size_unit_buffer = b_i == num_unit_buffers - 1
//// ? size_buffer_send - offset
//// : unit_buffer_size;
//// MPI_Bcast(buffer_recv.data() + offset,
//// size_unit_buffer * ETypeSize,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// offset += unit_buffer_size;
//// }
//// }
//// message_time += WallTimer::get_time_mark();
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
//
// // Every host sends to others
// for (int src = 0; src < num_hosts; ++src) {
// if (host_id == src) {
// // Send from src
// message_time -= WallTimer::get_time_mark();
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, host_id);
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// }
// message_time += WallTimer::get_time_mark();
// } else {
// // Receive from src
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, src);
// if (host_id == dst) {
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
// // Every host sends (num_hosts - 1) times
// for (int hop = 1; hop < num_hosts; ++hop) {
// int src = hop_2_me_host_id(-hop);
// int dst = hop_2_me_host_id(hop);
// if (src != dst) { // Normal case
// // When host_id is odd, first receive, then send.
// if (static_cast<uint32_t>(host_id) & 1U) {
// message_time -= WallTimer::get_time_mark();
// // Receive first.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// // Send then.
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // When host_id is even, first send, then receive.
// // Send first.
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// // Receive then.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// } else { // If host_id is higher than dst, first send, then receive
// // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2.
// if (host_id < dst) {
// // Send
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Receive
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // Otherwise, if host_id is lower than dst, first receive, then send
// // Receive
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Send
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
//}
//// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// const uint32_t UNIT_BUFFER_SIZE = 16U << 20U;
// // Every host h_i broadcast to others
// for (int h_i = 0; h_i < num_hosts; ++h_i) {
// uint64_t size_buffer_send = buffer_send.size();
// // Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&size_buffer_send,
// 1,
// MPI_UINT64_T,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
//// {// test
//// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
//// }
// if (!size_buffer_send) {
// continue;
// }
// uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE;
//
// // Broadcast the buffer_send
// for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
// // Prepare the unit buffer
// message_time -= WallTimer::get_time_mark();
// size_t offset = b_i * UNIT_BUFFER_SIZE;
// size_t size_unit_buffer = b_i == num_unit_buffers - 1
// ? size_buffer_send - offset
// : UNIT_BUFFER_SIZE;
// std::vector<E_T> unit_buffer(size_unit_buffer);
// // Copy the messages from buffer_send to unit buffer.
// if (host_id == h_i) {
// unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer);
// }
// // Broadcast the unit buffer
// MPI_Bcast(unit_buffer.data(),
// MPI_Instance::get_sending_size(unit_buffer),
// MPI_CHAR,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// // Process every element of unit_buffer
// for (const E_T &e : unit_buffer) {
// fun(e);
// }
// }
// }
//}
// Function: Host root broadcasts its sending buffer to a receiving buffer.
template <VertexID BATCH_SIZE>
template <typename E_T>
inline void DistBVCPLL<BATCH_SIZE>::
one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv)
{
const size_t ETypeSize = sizeof(E_T);
uint64_t size_buffer_send = buffer_send.size();
// Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&size_buffer_send,
1,
MPI_UINT64_T,
root,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
buffer_recv.resize(size_buffer_send);
if (!size_buffer_send) {
return;
}
// Broadcast the buffer_send
// message_time -= WallTimer::get_time_mark();
if (host_id == root) {
buffer_recv.assign(buffer_send.begin(), buffer_send.end());
}
uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) {
// Only need 1 broadcast
MPI_Bcast(buffer_recv.data(),
bytes_buffer_send,
MPI_CHAR,
root,
MPI_COMM_WORLD);
} else {
const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
size_t offset = 0;
for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
size_t size_unit_buffer = b_i == num_unit_buffers - 1
? size_buffer_send - offset
: unit_buffer_size;
MPI_Bcast(buffer_recv.data() + offset,
size_unit_buffer * ETypeSize,
MPI_CHAR,
root,
MPI_COMM_WORLD);
offset += unit_buffer_size;
}
}
// message_time += WallTimer::get_time_mark();
}
}
#endif //PADO_DPADO_H
|
GB_unop__identity_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint64)
// op(A') function: GB (_unop_tran__identity_uint16_uint64)
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8)
// A*D function (colscale): GB (_AxD__div_uint8)
// D*A function (rowscale): GB (_DxB__div_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8)
// C=scalar+B GB (_bind1st__div_uint8)
// C=scalar+B' GB (_bind1st_tran__div_uint8)
// C=A+scalar GB (_bind2nd__div_uint8)
// C=A'+scalar GB (_bind2nd_tran__div_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
particle.h | #pragma once
#define SAFTY_FACTOR 1.05
/////////////
/// Force ///
/////////////
class ForceGrav{
public:
PS::F32vec acc;
PS::F32 phi;
PS::S32 neighbor;
PS::S32 DUMMY_;
#ifdef FOR_PIKG01
PS::S32 id_neighbor;
PS::S32 id_neighbor_dmmy;
#else
PS::S64 id_neighbor;
#endif
void clear(){
acc = 0.;
phi = 0.;
neighbor = 0;
id_neighbor = -1;
}
};
//////////////////////////
/// Essential Particle ///
//////////////////////////
class EPIGrav{
public:
PS::F64vec pos; // position in cartesian
#ifdef USE_POLAR_COORDINATE
PS::F64vec pos_pol; // position in polar
#endif
#ifdef FOR_PIKG01
PS::S32 id;
PS::S32 id_dmmy;
#else
PS::S64 id; // id number
#endif
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out; // cut-off radius
PS::F64 r_search; // search radius
#else
static PS::F64 r_out;
static PS::F64 r_search;
#endif
PS::F64vec getPos() const {
#ifdef USE_POLAR_COORDINATE
return pos_pol;
#else
return pos;
#endif
}
PS::F64vec getPosCar() const { return pos; }
PS::F64 getRSearch() const {
#ifdef USE_POLAR_COORDINATE
return SAFTY_FACTOR * r_search / sqrt(pos*pos);
#else
return SAFTY_FACTOR * r_search;
#endif
}
void copyFromFP(const EPIGrav & fp){
pos = fp.pos;
#ifdef USE_POLAR_COORDINATE
pos_pol = fp.pos_pol;
#endif
id = fp.id;
#ifdef USE_INDIVIDUAL_CUTOFF
r_out = fp.r_out;
r_search = fp.r_search;
#endif
}
};
#ifndef USE_INDIVIDUAL_CUTOFF
PS::F64 EPIGrav::r_out;
PS::F64 EPIGrav::r_search;
#endif
class EPJGrav : public EPIGrav {
public:
PS::F64 mass; // mass
PS::F64vec vel; // valocity
PS::F64vec acc_d; // acceleration for hard part
PS::S32 id_local; // local id number
PS::S32 myrank; // rank number
PS::F64 getCharge() const { return mass; }
void copyFromFP(const EPJGrav & fp){
EPIGrav::copyFromFP(fp);
mass = fp.mass;
vel = fp.vel;
acc_d = fp.acc_d;
id_local = fp.id_local;
myrank = fp.myrank;
}
};
/////////////////////
/// Full Particle ///
/////////////////////
#define SECONDORDER 5.e-1
#define THIRDORDER 1.6666666666666667e-1
#define FOURTHORDER 4.1666666666666667e-2
#define FIFTHORDER 8.3333333333333333e-3
#define SIXTHORDER 1.3888888888888889e-3
#define SEVENTHORDER 1.9841269841269841e-4
#define ALPHA 1.1666666666666667e-3
#define ONE_TWELFTH 8.3333333333333333e-2
#define ONE_SIXTIETH 1.6666666666666667e-2
#define ONE_420TH 2.3809523809523810e-3
inline PS::F64 calcDt2nd(PS::F64 eta,
PS::F64 alpha2,
PS::F64 acc0,
PS::F64vec acc,
PS::F64vec jerk){
PS::F64 Acc2 = acc*acc + alpha2*acc0*acc0;
PS::F64 Jerk2 = jerk*jerk;
//PS::F64 dt2 = (Jerk2>0.) ? Acc2/Jerk2 : std::numeric_limits<double>::max();
//return eta * sqrt(dt2);
return (Jerk2>0.) ? eta*sqrt(Acc2/Jerk2) : std::numeric_limits<double>::max();
}
inline PS::F64 calcDt3rd(PS::F64 eta,
PS::F64 alpha2,
PS::F64 acc0,
PS::F64vec acc,
PS::F64vec snap){
PS::F64 Acc2 = acc*acc + alpha2*acc0*acc0;
PS::F64 Snap2 = snap*snap;
return (Snap2>0.) ? eta*pow(Acc2/Snap2, 0.25) : std::numeric_limits<double>::max();
}
inline PS::F64 calcDt4th(PS::F64 eta,
PS::F64 alpha2,
PS::F64 acc0,
PS::F64vec acc,
PS::F64vec jerk,
PS::F64vec snap,
PS::F64vec crac){
PS::F64 Acc = sqrt(acc*acc + alpha2*acc0*acc0);
PS::F64 Jerk2 = jerk*jerk;
PS::F64 Jerk = sqrt(Jerk2);
PS::F64 Snap2 = snap*snap;
PS::F64 Snap = sqrt(Snap2);
PS::F64 Crac = sqrt(crac*crac);
//PS::F64 dt2 = (Jerk>0.) ? (Acc*Snap + Jerk2)/(Jerk*Crac + Snap2) : std::numeric_limits<double>::max();
//return eta * sqrt(dt2);
return (Jerk>0.) ? eta*sqrt((Acc*Snap + Jerk2)/(Jerk*Crac + Snap2)) : std::numeric_limits<double>::max();
}
inline PS::F64 calcDt6th(PS::F64 eta,
PS::F64 alpha2,
PS::F64 acc0,
PS::F64vec acc,
PS::F64vec jerk,
PS::F64vec snap,
PS::F64vec crac,
PS::F64vec pop,
PS::F64vec a5){
PS::F64 Acc = sqrt(acc*acc + alpha2*acc0*acc0);
PS::F64 Jerk2 = jerk*jerk;
PS::F64 Snap = sqrt(snap*snap);
PS::F64 Crac = sqrt(crac*crac);
PS::F64 Pop2 = pop*pop;
PS::F64 A5 = sqrt(a5*a5);
return (Jerk2>0.) ? eta*pow((Acc*Snap + Jerk2)/(Crac*A5 + Pop2),THIRDORDER) : std::numeric_limits<double>::max();
}
class FPGrav : public EPJGrav {
public:
PS::F64vec acc; // acceleration for soft part
PS::F64vec acc_s; // acceleration by sun
PS::F64vec jerk_d; // jerk by planet
PS::F64vec jerk_s; // jerk by sun
#ifdef INTEGRATE_6TH_SUN
PS::F64vec acc_;
PS::F64vec snap_s;
#endif
PS::F64vec acc_gd; // acceleration by gas drag
PS::F64 phi; // potential for soft part
PS::F64 phi_d; // potential by planets
PS::F64 phi_s; // potential by sun
static PS::F64 m_sun;
static PS::F64 dens;
static PS::F64 eps2;
static PS::F64 eps2_sun;
static PS::F64 R_cut0;
static PS::F64 R_cut1;
static PS::F64 R_search0;
static PS::F64 R_search1;
#ifdef USE_RE_SEARCH_NEIGHBOR
static PS::F64 R_search2;
static PS::F64 R_search3;
#endif
static PS::F64 gamma;
static PS::F64 g_1_inv; // 1/(g-1)
static PS::F64 g_1_inv7; // 1/(g-1)^7
static PS::F64 w_y; // dW/dy if y<g
static PS::F64 f1; // f(1;g)
#ifdef INDIRECT_TERM
static PS::F64vec acc_indirect;
static PS::F64vec pos_g;
static PS::F64vec vel_g;
static PS::F64 mass_tot;
#endif
#ifdef USE_INDIVIDUAL_CUTOFF
#ifndef CONSTANT_RANDOM_VELOCITY
PS::F64 v_disp;
#else
static PS::F64 v_disp;
#endif
#endif
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv;
#else
static PS::F64 r_out_inv;
#endif
PS::F64 time;
PS::F64 dt;
PS::F64 acc0;
static PS::F64 dt_tree;
static PS::F64 dt_min;
static PS::F64 eta;
static PS::F64 eta_0;
static PS::F64 eta_sun;
static PS::F64 eta_sun0;
static PS::F64 alpha2;
PS::F64 r_planet;
PS::F64 f;
static PS::F64 r_cut_min;
static PS::F64 r_cut_max;
static PS::F64 p_cut;
static PS::F64 increase_factor;
PS::S64 id_neighbor;
PS::S64 id_cluster;
PS::S32 n_cluster;
PS::S32 neighbor;
bool inDomain;
bool isSent;
bool isDead;
bool isMerged;
#ifdef MERGE_BINARY
bool isBinary;
static PS::F64 R_merge;
#endif
static void setGamma(PS::F64 g){
gamma = g;
g_1_inv = 1./(g - 1.);
PS::F64 g2 = g*g;
PS::F64 g_1_inv3 = g_1_inv * g_1_inv * g_1_inv;
g_1_inv7 = g_1_inv3 * g_1_inv3 * g_1_inv;
w_y = 7./3. * ((((((g- 9.)*g +45.)*g -60.*log(g))*g -45.)*g +9.)*g -1.) * g_1_inv7;
f1 = (-10./3. + 14.*(g+1.) - 21.*((g+3.)*g+1.)
+ 35./3.*(((g+9.)*g+9.)*g+1.)
- 70.*((g+3.)*g+1.)*g
+ 210.*(g+1.)*g2
+ (((g-7.)*g+21.)*g-35.)*g2*g2 ) * g_1_inv7;
}
PS::F64 getGamma() const{ return gamma; }
PS::F64 getEps2() const{ return eps2;}
PS::F64 getEps2_sun() const{ return eps2_sun;}
PS::F64 getROut() const { return r_out; }
PS::F64 getROut_inv() const { return r_out_inv; }
#ifdef USE_POLAR_COORDINATE
void setPosPolar() {
PS::F64 r = sqrt(pos*pos);
pos_pol.x = atan2(pos.y, pos.x);
pos_pol.y = log(r);
pos_pol.z = asin(pos.z / r);
}
#endif
static PS::F64 getSolarMass() { return m_sun; }
#ifndef WITHOUT_SUN
PS::F64 getSemimajorAxis() const {
#ifndef INDIRECT_TERM
return 1.0 / (2.0/sqrt(pos*pos) - vel*vel/m_sun);
#else
return 1.0 / (2.0/sqrt(pos*pos) - vel*vel/(m_sun+mass));
#endif
}
PS::F64 getSemimajorAxis2() const {
PS::F64 ax;
if ( getEccentricity(ax) < 0.6 ) {
return ax;
} else {
return sqrt(pos*pos);
}
}
PS::F64 getEccentricity(PS::F64 & ax) const {
PS::F64 r = sqrt(pos*pos);
PS::F64 rv = pos*vel;
#ifndef INDIRECT_TERM
ax = 1.0 / (2.0/r - vel*vel/m_sun);
PS::F64 ecccosu = 1. - r/ax;
PS::F64 eccsinu2 = rv*rv/(m_sun*ax);
#else
ax = 1.0 / (2.0/r - vel*vel/(m_sun+mass));
PS::F64 ecccosu = 1. - r/ax;
PS::F64 eccsinu2 = rv*rv/((m_sun+mass)*ax);
#endif
return sqrt(ecccosu*ecccosu + eccsinu2);
}
PS::F64 getEccentricity() const {
PS::F64 ax;
return getEccentricity(ax);
}
PS::F64 getInclination(PS::F64vec & h) const {
h.x = pos.y*vel.z - pos.z*vel.y;
h.y = pos.z*vel.x - pos.x*vel.z;
h.z = pos.x*vel.y - pos.y*vel.x;
return atan2(sqrt(h.x*h.x + h.y*h.y), h.z);
}
PS::F64 getInclination() const {
PS::F64vec h;
return getInclination(h);
}
PS::F64 getRHill() const {
PS::F64 ax = getSemimajorAxis2();
return pow(mass/(3.*m_sun), 1./3.) * ax;
}
PS::F64 getKeplerVelocity() const {
PS::F64 r = sqrt(pos.x * pos.x + pos.y * pos.y);
return sqrt(m_sun/r);
}
#endif // WITHOUT_SUN
#ifdef INTEGRATE_6TH_SUN
void setAcc_() { acc_ = acc_s + acc_d; }
#endif
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 setROutRSearch(){
#ifndef WITHOUT_SUN
PS::F64 rHill = getRHill();
PS::F64 ax = getSemimajorAxis2();
PS::F64 r_out_i = std::max(R_cut0*pow(ax,-p_cut)*rHill, R_cut1*v_disp*dt_tree);
#else
PS::F64 rHill = 0.;
PS::F64 r_out_i = std::max(R_cut0*pow(mass * dt_tree * dt_tree, 1./3.), R_cut1*v_disp*dt_tree);
#endif
r_out = std::max(r_out_i, r_cut_min);
if ( r_cut_max > 0. ) r_out = std::min(r_cut_max, r_out);
r_out_inv = 1. / r_out;
r_search = R_search0*r_out + R_search1*v_disp*dt_tree;
assert ( r_out > 0. && r_search > 0. && r_search > r_out );
return rHill;
}
#else //USE_INDIVIDUAL_CUTOFF
static void setROutRSearch(PS::F64 rHill_a_glb,
PS::F64 v_disp_glb){
#ifndef WITHOUT_SUN
PS::F64 r_out_i = std::max(R_cut0*rHill_a_glb, R_cut1*v_disp_glb*dt_tree);
#else
PS::F64 r_out_i = std::max(R_cut0*pow(rHill_a_glb * dt_tree * dt_tree, 1./3.), R_cut1*v_disp_glb*dt_tree);
#endif
r_out = std::max(r_out_i, r_cut_min);
if ( r_cut_max > 0. ) r_out = std::min(r_cut_max, r_out);
r_out_inv = 1. / r_out;
r_search = R_search0*r_out + R_search1*v_disp_glb*dt_tree;
assert ( r_out > 0. && r_search > 0. );
}
#endif //USE_INDIVIDUAL_CUTOFF
void setRPlanet() {
r_planet = pow(0.75*mass/(MY_PI*dens), 1./3.);
}
void copyFromForce(const ForceGrav & force){
acc = force.acc;
phi = force.phi;
neighbor = force.neighbor;
id_neighbor = force.id_neighbor;
}
void copy(const FPGrav & fp){
EPJGrav::copyFromFP(fp);
acc = fp.acc;
acc_s = fp.acc_s;
jerk_d = fp.jerk_d;
jerk_s = fp.jerk_s;
#ifdef INTEGRATE_6TH_SUN
acc_ = fp.acc_;
snap_s = fp.snap_s;
#endif
acc_gd = fp.acc_gd;
phi = fp.phi;
phi_d = fp.phi_d;
phi_s = fp.phi_s;
#ifdef USE_INDIVIDUAL_CUTOFF
#ifndef CONSTANT_RANDOM_VELOCITY
v_disp = fp.v_disp;
#endif
#endif
#ifdef USE_INDIVIDUAL_CUTOFF
r_out_inv = fp.r_out_inv;
#endif
time = fp.time;
dt = fp.dt;
acc0 = fp.acc0;
r_planet = fp.r_planet;
f = fp.f;
id_neighbor = fp.id_neighbor;
id_cluster = fp.id_cluster;
n_cluster = fp.n_cluster;
neighbor = fp.neighbor;
inDomain = fp.inDomain;
isSent = fp.isSent;
isDead = fp.isDead;
isMerged = fp.isMerged;
#ifdef MERGE_BINARY
isBinary = fp.isBinary;
#endif
}
void dump(std::ostream & fout = std::cout) const {
fout<<"id= "<<id<<std::endl;
fout<<"mass= "<<mass<<std::endl;
fout<<"pos= "<<pos<<std::endl;
#ifdef USE_POLAR_COORDINATE
fout<<"pos_pol="<<pos_pol<<std::endl;
#endif
fout<<"vel= "<<vel<<std::endl;
fout<<"acc="<<acc<<std::endl;
fout<<"phi="<<phi<<std::endl;
}
void readAscii(FILE* fp) {
PS::S32 Flag;
if ( !fscanf(fp, "%d\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%d\t%d\n",
&this->id, &this->mass, &this->r_planet, &this->f,
&this->pos.x, &this->pos.y, &this->pos.z,
&this->vel.x, &this->vel.y, &this->vel.z,
&this->neighbor, &Flag) ) {
//&this->r_out, &this->r_search) ) {
errorMessage("The particle data have NOT been correctly read.");
PS::Abort();
}
#ifdef MERGE_BINARY
isBinary = (bool)(Flag & (1<<0));
#endif
}
void writeAscii(FILE* fp) const {
PS::S32 Flag = 0;
#ifdef MERGE_BINARY
Flag |= ((PS::S32)isBinary)<<0;
#endif
if ( !fprintf(fp, "%d\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%20.15e\t%d\t%d\n",
this->id, this->mass, this->r_planet, this->f,
this->pos.x, this->pos.y, this->pos.z,
this->vel.x, this->vel.y, this->vel.z,
this->neighbor, Flag) ) {
//this->r_out, this->r_search) ){
errorMessage("The particle data have NOT been correctly written.");
PS::Abort();
}
}
void readBinary(FILE* fp) {
FPGrav buf;
if ( !fread(&buf, sizeof(buf), 1, fp) ) {
errorMessage("The particle data have NOT been correctly read.");
PS::Abort();
}
copy(buf);
}
void writeBinary(FILE* fp) const {
FPGrav buf;
buf.copy(*this);
if ( !fwrite(&buf, sizeof(buf), 1, fp) ) {
errorMessage("The particle data have NOT been correctly written.");
PS::Abort();
}
}
void velKick(){
#ifdef INDIRECT_TERM
vel += 0.5*dt_tree*(acc + acc_indirect);
#else
vel += 0.5*dt_tree*acc;
#endif
}
void calcDeltatInitial(){
PS::F64 dt_next = 0.5*dt_tree;
#ifndef INTEGRATE_6TH_SUN
PS::F64 dt_1 = std::min(calcDt2nd(eta_0, alpha2, acc0, acc_d, jerk_d),
calcDt2nd(eta_sun0, alpha2, 0., acc_s, jerk_s));
//PS::F64 dt_1 = calcDt2nd(eta_0, alpha2, acc0, acc_d, jerk_d);
#else
#ifdef AARSETH
PS::F64 dt_1 = std::min(calcDt2nd(eta_0, alpha2, acc0, acc_d, jerk_d),
calcDt2nd(eta_sun0, alpha2, 0., acc_s, jerk_s));
#else
PS::F64 dt_1 = std::min(calcDt2nd(eta_0, alpha2, acc0, acc_d, jerk_d),
calcDt3rd(eta_sun0, alpha2, 0., acc_s, snap_s));
#endif
#endif
PS::F64 rem = fmod(time, dt_next);
while( rem != 0.0 ){
dt_next *= 0.5;
rem = fmod(time, dt_next);
}
if ( dt > 0. )
while( 2.*dt < dt_next ) dt_next *= 0.5;
while( dt_1 < dt_next ) dt_next *= 0.5;
if( dt_next < 2.*dt_min ) dt_next = dt_min;
dt = dt_next;
}
};
PS::F64 FPGrav::m_sun = 1.;
PS::F64 FPGrav::dens = 5.049667e6;
PS::F64 FPGrav::eps2 = 0.;
PS::F64 FPGrav::eps2_sun = 0.;
PS::F64 FPGrav::R_cut0 = 2.;
PS::F64 FPGrav::R_cut1 = 8.;
PS::F64 FPGrav::R_search0 = 1.;
PS::F64 FPGrav::R_search1 = 4.;
#ifdef USE_RE_SEARCH_NEIGHBOR
PS::F64 FPGrav::R_search2 = 1.;
PS::F64 FPGrav::R_search3 = 4.;
#endif
PS::F64 FPGrav::gamma = 0.5;
PS::F64 FPGrav::g_1_inv = -2.; // 1/(gamma-1)
PS::F64 FPGrav::g_1_inv7 = -128.; // 1/(gamma-1)^7
PS::F64 FPGrav::w_y; // dW/dy when y<g
PS::F64 FPGrav::f1; // f(1;g)
#ifdef INDIRECT_TERM
PS::F64vec FPGrav::acc_indirect = 0.;
PS::F64vec FPGrav::pos_g = 0.;
PS::F64vec FPGrav::vel_g = 0.;
PS::F64 FPGrav::mass_tot = 0.;
#endif
#ifdef CONSTANT_RANDOM_VELOCITY
PS::F64 FPGrav::v_disp = 0.;
#endif
#ifndef USE_INDIVIDUAL_CUTOFF
PS::F64 FPGrav::r_out_inv;
#endif
PS::F64 FPGrav::dt_tree = pow2(-5);
PS::F64 FPGrav::dt_min = pow2(-13);
PS::F64 FPGrav::eta = 0.01;
PS::F64 FPGrav::eta_0 = 0.001;
PS::F64 FPGrav::eta_sun = 0.01;
PS::F64 FPGrav::eta_sun0 = 0.001;
PS::F64 FPGrav::alpha2 = 1.;
PS::F64 FPGrav::r_cut_min = 0.;
PS::F64 FPGrav::r_cut_max = 0.;
PS::F64 FPGrav::p_cut = 0.;
PS::F64 FPGrav::increase_factor = 1.;
#ifdef MERGE_BINARY
PS::F64 FPGrav::R_merge = 0.2;
#endif
class FPHard : public FPGrav {
public:
PS::F64vec x0;
PS::F64vec v0;
PS::F64vec a0_s;
PS::F64vec j0_s;
PS::F64vec a0_d;
PS::F64vec j0_d;
PS::F64vec xp;
PS::F64vec vp;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec s0_s;
PS::F64vec ap;
#endif
#ifndef INTEGRATE_6TH_SUN
PS::F64vec a2_s;
PS::F64vec a3_s;
#else
PS::F64vec a3_s;
PS::F64vec a4_s;
PS::F64vec a5_s;
#endif
PS::F64vec a2_d;
PS::F64vec a3_d;
PS::F64 time_c;
std::vector<PS::S64> n_list;
std::vector<PS::S32> n_hard_list;
void clearList(){
//std::vector<PS::S32> tmp0, tmp1;
//tmp0.swap(n_list);
//tmp1.swap(n_hard_list);
n_list.clear();
n_hard_list.clear();
}
void copyList(std::vector<PS::S64> list){
n_list.resize(list.size());
std::copy(list.begin(),list.end(),n_list.begin());
}
void copyList(PS::S64 * list){
n_list.clear();
n_list.reserve(neighbor);
for ( PS::S32 i=0; i<neighbor; i++ ) n_list.push_back(list[i]);
}
void copyHardList(std::vector<PS::S32> list){
n_hard_list.resize(list.size());
std::copy(list.begin(),list.end(),n_hard_list.begin());
}
void copyHardList(PS::S32 * list){
n_hard_list.clear();
n_hard_list.reserve(neighbor);
for ( PS::S32 i=0; i<neighbor; i++ ) n_hard_list.push_back(list[i]);
}
void makeHardList(std::map<PS::S64,PS::S32> & id_map){
n_hard_list.clear();
n_hard_list.reserve(neighbor);
for ( PS::S32 i=0; i<neighbor; i++){
n_hard_list.push_back(id_map.at(n_list.at(i)));
}
}
template <class Tpsys>
void makeHardList(std::map<PS::S64,PS::S32> & id_map,
Tpsys & pp){
n_hard_list.clear();
for ( PS::S32 i=0; i<neighbor; i++){
PS::S32 id_hard = id_map.at(n_list.at(i));
if ( pp[id_hard].mass > 0. ) n_hard_list.push_back(id_hard);
}
neighbor = n_hard_list.size();
}
FPHard(){
x0 = v0 = 0.;
a0_s = j0_s = 0.;
a0_d = j0_d = 0.;
xp = vp = 0.;
#ifdef INTEGRATE_6TH_SUN
s0_s = 0.;
ap = 0.;
#endif
#ifndef INTEGRATE_6TH_SUN
a2_s = a3_s = 0.;
#else
a3_s = a4_s = a5_s = 0.;
#endif
a2_d = a3_d = 0.;
time_c = 0.;
r_planet = f = 0.;
clearList();
}
FPHard(const FPHard & fp) : FPGrav(fp){
x0 = fp.x0;
v0 = fp.v0;
a0_s = fp.a0_s;
j0_s = fp.j0_s;
a0_d = fp.a0_d;
j0_d = fp.j0_d;
xp = fp.xp;
vp = fp.vp;
#ifdef INTEGRATE_6TH_SUN
s0_s = fp.s0_s;
ap = fp.ap;
#endif
#ifndef INTEGRATE_6TH_SUN
a2_s = fp.a2_s;
a3_s = fp.a3_s;
#else
a3_s = fp.a3_s;
a4_s = fp.a4_s;
a5_s = fp.a5_s;
#endif
a2_d = fp.a2_d;
a3_d = fp.a3_d;
time_c = fp.time_c;
copyList(fp.n_list);
copyHardList(fp.n_hard_list);
}
FPHard(const FPGrav & fp) : FPGrav(fp){
x0 = v0 = 0.;
a0_s = j0_s = 0.;
a0_d = j0_d = 0.;
xp = vp = 0.;
#ifdef INTEGRATE_6TH_SUN
s0_s = 0;
ap = 0;
#endif
#ifndef INTEGRATE_6TH_SUN
a2_s = a3_s = 0.;
#else
a3_s = a4_s = a5_s = 0.;
#endif
a2_d = a3_d = 0.;
time_c = fp.time;
time = 0;
clearList();
}
FPHard &operator=(const FPHard & fp){
FPGrav::operator=(fp);
if ( this != &fp ){
x0 = fp.x0;
v0 = fp.v0;
a0_s = fp.a0_s;
j0_s = fp.j0_s;
a0_d = fp.a0_d;
j0_d = fp.j0_d;
xp = fp.xp;
vp = fp.vp;
#ifdef INTEGRATE_6TH_SUN
s0_s = fp.s0_s;
ap = fp.ap;
#endif
#ifndef INTEGRATE_6TH_SUN
a2_s = fp.a2_s;
a3_s = fp.a3_s;
#else
a3_s = fp.a3_s;
a4_s = fp.a4_s;
a5_s = fp.a5_s;
#endif
a2_d = fp.a2_d;
a3_d = fp.a3_d;
time_c = fp.time_c;
copyList(fp.n_list);
copyHardList(fp.n_hard_list);
}
return *this;
}
FPHard &operator=(const FPGrav & fp){
FPGrav::operator=(fp);
if ( this != &fp ){
x0 = v0 = 0.;
a0_s = j0_s = 0.;
a0_d = j0_d = 0.;
xp = vp = 0.;
#ifdef INTEGRATE_6TH_SUN
s0_s = 0;
ap = 0;
#endif
#ifndef INTEGRATE_6TH_SUN
a2_s = a3_s = 0.;
#else
a3_s = a4_s = a5_s = 0.;
#endif
a2_d = a3_d = 0.;
time_c = fp.time;
time = 0;
clearList();
}
return *this;
}
void resetTime() {
time = time_c + time;
time_c = 0.;
}
PS::F64 getTime() const { return time_c + time; }
void predict(PS::F64 Dt){
x0 = pos;
v0 = vel;
a0_s = acc_s;
j0_s = jerk_s;
#ifdef INTEGRATE_6TH_SUN
s0_s = snap_s;
#endif
j0_d = jerk_d;
a0_d = acc_d;
#ifndef INTEGRATE_6TH_SUN
PS::F64vec acc_sd = acc_s + acc_d;
PS::F64vec jerk_sd = jerk_s + jerk_d;
xp = pos + Dt*(vel + Dt*(SECONDORDER*acc_sd + THIRDORDER*Dt*jerk_sd));
vp = vel + Dt*(acc_sd + Dt* SECONDORDER*jerk_sd);
#else
assert ( acc_ == acc_s + acc_d );
PS::F64vec jerk_sd = jerk_s + jerk_d;
PS::F64vec snap_sd = snap_s;
xp = pos + Dt*(vel + Dt*(SECONDORDER*acc_ + Dt*(THIRDORDER*jerk_sd + Dt*FOURTHORDER*snap_sd)));
vp = vel + Dt*(acc_ + Dt*(SECONDORDER*jerk_sd + Dt* THIRDORDER*snap_sd));
ap = acc_ + Dt*(jerk_sd + Dt* SECONDORDER*snap_sd);
#endif
}
void correct(PS::F64 Dt){
PS::F64 Dt2 = Dt*Dt;
PS::F64 Dt_inv = 1./Dt;
PS::F64 Dt_inv2 = Dt_inv *Dt_inv;
PS::F64 Dt_inv3 = Dt_inv2*Dt_inv;
#ifndef WITHOUT_SUN
PS::F64vec Am_s = acc_s - a0_s;
PS::F64vec J0_s = Dt*j0_s;
PS::F64vec J1_s = Dt*jerk_s;
#ifndef INTEGRATE_6TH_SUN
PS::F64vec A2_s = 3.*Am_s - (J1_s + 2.*J0_s);
PS::F64vec A3_s = -2.*Am_s + (J1_s + J0_s);
a2_s = 2.*Dt_inv2*(A2_s + 3.*A3_s);
a3_s = 6.*Dt_inv3* A3_s;
#else
PS::F64vec S0_s = SECONDORDER*Dt2*s0_s;
PS::F64vec S1_s = SECONDORDER*Dt2*snap_s;
PS::F64vec A3_s = 10.*Am_s - (4.*J1_s + 6.*J0_s) + ( S1_s - 3.*S0_s);
PS::F64vec A4_s = -15.*Am_s + (7.*J1_s + 8.*J0_s) - (2.*S1_s - 3.*S0_s);
PS::F64vec A5_s = 6.*Am_s - 3.*(J1_s + J0_s) + ( S1_s - S0_s);
PS::F64 Dt_inv4 = Dt_inv2*Dt_inv2;
PS::F64 Dt_inv5 = Dt_inv3*Dt_inv2;
a3_s = 3.*Dt_inv3*(A3_s + 4.*A4_s + 10.*A5_s);
a4_s = 24.*Dt_inv4*(A4_s + 5.*A5_s);
a5_s = 120.*Dt_inv5*A5_s;
#endif
#else //WITHOUT_SUN
#ifndef INTEGRATE_6TH_SUN
PS::F64vec A2_s = 0.;
PS::F64vec A3_s = 0.;
a2_s = a3_s = 0.;
#else
PS::F64vec A3_s = 0.;
PS::F64vec A4_s = 0.;
PS::F64vec A5_s = 0.;
a3_s = a4_s = a5_s = 0.;
#endif
#endif //WITHOUT_SUN
PS::F64vec Am_d = acc_d - a0_d;
PS::F64vec J0_d = j0_d * Dt;
PS::F64vec J1_d = jerk_d * Dt;
PS::F64vec A2_d = 3.*Am_d - (J1_d + 2.*J0_d);
PS::F64vec A3_d = -2.*Am_d + (J1_d + J0_d);
a2_d = 2. * (A2_d + 3.*A3_d) * Dt_inv2;
a3_d = 6. * A3_d * Dt_inv3;
#ifndef INTEGRATE_6TH_SUN
PS::F64vec A2_sd = A2_s+A2_d;
PS::F64vec A3_sd = A3_s+A3_d;
pos = xp + ONE_SIXTIETH*Dt2*(5.*A2_sd + 3.*ALPHA*A3_sd);
vel = vp + ONE_TWELFTH *Dt *(4.*A2_sd + 3. *A3_sd);
#else
PS::F64vec A2_sd = A2_d;
PS::F64vec A3_sd = A3_s+A3_d;
PS::F64vec A4_sd = A4_s;
PS::F64vec A5_sd = A5_s;
pos = xp + ONE_420TH *Dt2*(35.*A2_sd + 21.*A3_sd + 14.*A4_sd + 10.*A5_sd);
vel = vp + ONE_SIXTIETH*Dt *(20.*A2_sd + 15.*A3_sd + 12.*A4_sd + 10.*A5_sd);
assert( acc_ == acc_s + acc_d );
#endif
}
void calcDeltat(){
PS::F64 dt_next = std::min(2.*dt, 0.5*dt_tree);
#ifndef INTEGRATE_6TH_SUN
#ifndef WITHOUT_SUN
PS::F64 dt_1 = std::min(calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d),
calcDt4th(eta_sun, alpha2, 0., acc_s, jerk_s, a2_s, a3_s));
//PS::F64 dt_1 = calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d+a3_d*dt, a3_d);
#else
PS::F64 dt_1 = calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d);
#endif
#else
#ifdef AARSETH
#ifndef WITHOUT_SUN
PS::F64 dt_1 = std::min(calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d),
calcDt4th(eta_sun, alpha2, 0., acc_s, jerk_s, snap_s, a3_s));
#else
PS::F64 dt_1 = calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d);
#endif
#else
#ifndef WITHOUT_SUN
PS::F64 dt_1 = std::min(calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d),
calcDt6th(eta_sun, alpha2, 0., acc_s, jerk_s, snap_s, a3_s, a4_s, a5_s));
#else
PS::F64 dt_1 = calcDt4th(eta, alpha2, acc0, acc_d, jerk_d, a2_d, a3_d);
#endif
#endif
#endif
PS::F64 rem = fmod(time, dt_next);
while(rem != 0.0){
dt_next *= 0.5;
rem = fmod(time, dt_next);
}
while(dt_1 < dt_next && 0.5*dt < dt_next) dt_next *= 0.5;
if( dt_next < 2.*dt_min ) dt_next = dt_min;
dt = dt_next;
}
};
template <class Tpsys>
void calcRandomVel(Tpsys & pp,
const PS::S32 n_tot,
const PS::S32 n_loc)
{
PS::F64 r_max=0., r_min=-std::numeric_limits<PS::F64>::max();
#pragma omp parallel for reduction (max: r_max, r_min)
for(PS::S32 i=0; i<n_loc; i++){
PS::F64vec pos = pp[i].pos;
PS::F64 r2 = pos.x*pos.x + pos.y*pos.y;
r_max = std::max(r_max, r2);
r_min = std::max(r_min, -r2);
}
r_max = sqrt(r_max) * 1.01;
r_min = sqrt(-r_min) * 0.99;
r_max = PS::Comm::getMaxValue(r_max);
r_min = PS::Comm::getMinValue(r_min);
const PS::S32 N = 32;
const PS::F64 dr = (r_max - r_min) /N;
const PS::F64 drinv = 1./dr;
#ifdef ISOTROPIC
PS::F64vec v_ave_loc[N];
PS::F64vec v_ave_glb[N];
PS::F64 v_sq_loc[N];
PS::F64 v_sq_glb[N];
#else
PS::F64 v_disp_loc[N];
#endif
PS::F64 v_disp_glb[N];
PS::S32 n_ptcl_loc[N];
PS::S32 n_ptcl_glb[N];
#ifdef ISOTROPIC
for(PS::S32 i=0; i<N; i++) {
v_ave_loc[i] = 0.;
v_sq_loc[i] = 0.;
n_ptcl_loc[i] = 0;
}
//#pragma omp parallel for reduction (+:v_ave_loc[:N], v_sq_loc[:N], n_ptcl_loc[:N])
for(PS::S32 i=0; i<n_loc; i++){
PS::F64vec pos = pp[i].pos;
PS::F64 r2 = pos.x*pos.x + pos.y*pos.y;
PS::S32 j = (PS::S32)((sqrt(r2) - r_min) * drinv);
if ( j == N ) j = N - 1;
assert ( 0<= j && j < N );
PS::F64vec vec = pp[i].vel;
v_ave_loc[j] += vec;
v_sq_loc[j] += vec*vec;
n_ptcl_loc[j] ++;
}
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
MPI_Allreduce(v_ave_loc, v_ave_glb, N, PS::GetDataType(*v_ave_loc), MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(v_sq_loc, v_sq_glb, N, PS::GetDataType(*v_sq_loc), MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(n_ptcl_loc, n_ptcl_glb, N, PS::GetDataType(*n_ptcl_loc), MPI_SUM, MPI_COMM_WORLD);
#else
for(PS::S32 i=0; i<N; i++) {
v_ave_glb[i] = v_ave_loc[i];
v_sq_glb[i] = v_sq_loc[i];
n_ptcl_glb[i] = n_ptcl_loc[i];
}
#endif
PS::S32 n_tot0 = 0;
for(PS::S32 i=0; i<N; i++) {
v_disp_glb[i] = (n_ptcl_glb[i] > 1) ?
sqrt(v_sq_glb[i] / n_ptcl_glb[i]
- v_ave_glb[i]*v_ave_glb[i]/( n_ptcl_glb[i]* n_ptcl_glb[i]) ) : 0.;
n_tot0 += n_ptcl_glb[i];
}
assert ( n_tot == n_tot0 );
#else //ISOTROPIC
for(PS::S32 i=0; i<N; i++) {
v_disp_loc[i] = 0.;
n_ptcl_loc[i] = 0;
}
#pragma omp parallel for reduction (+:v_disp_loc[:N], n_ptcl_loc[:N])
for(PS::S32 i=0; i<n_loc; i++){
PS::F64vec pos = pp[i].pos;
PS::F64 r2 = pos.x*pos.x + pos.y*pos.y;
PS::F64 ri = sqrt(r2);
PS::S32 j = (PS::S32)((ri - r_min) * drinv);
if ( j == N ) j = N - 1;
assert ( 0<= j && j < N );
#if 1
PS::F64vec v_kep;
v_kep.x=-pos.y/ri ; v_kep.y=pos.x/ri; v_kep.z=0.;
v_kep *= pp[i].getKeplerVelocity();
PS::F64vec v_ran = pp[i].vel - v_kep;
#else
PS::F64 ax;
PS::F64 ecc = pp[i].getEccentricity(ax);
PS::F64vec h;
PS::F64 inc = pp[i].getInclination(h);
PS::F64vec v_kep = pp[i].getKeplerVelocity();
PS::F64 v_ran = (ecc*ecc + inc*inc) * v_kep*v_kep;
#endif
v_disp_loc[j] += v_ran * v_ran;
n_ptcl_loc[j] ++;
}
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
MPI_Allreduce(v_disp_loc, v_disp_glb, N, PS::GetDataType(*v_disp_loc), MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(n_ptcl_loc, n_ptcl_glb, N, PS::GetDataType(*n_ptcl_loc), MPI_SUM, MPI_COMM_WORLD);
#else
for(PS::S32 i=0; i<N; i++) {
v_disp_glb[i] = v_disp_loc[i];
n_ptcl_glb[i] = n_ptcl_loc[i];
}
#endif
PS::S32 n_tot0 = 0;
for(PS::S32 i=0; i<N; i++) {
v_disp_glb[i] = (n_ptcl_glb[i] > 0) ? sqrt(v_disp_glb[i] / n_ptcl_glb[i]) : 0.;
n_tot0 += n_ptcl_glb[i];
}
assert ( n_tot == n_tot0 );
#endif //ISOTROPIC
#pragma omp parallel for
for(PS::S32 i=0; i<n_loc; i++){
PS::F64vec pos = pp[i].pos;
PS::F64 r2 = pos.x*pos.x + pos.y*pos.y;
PS::F64 v_dispi = 0.;
PS::F64 ni = 0.;
for(PS::S32 j=0; j<N; j++){
PS::F64 ddr = r_min + (j+0.5)*dr - sqrt(r2);
PS::F64 expr = exp(-ddr*ddr * drinv*drinv);
v_dispi += v_disp_glb[j] * expr;
ni += (v_disp_glb[j] > 0) ? expr : 0.;
}
pp[i].v_disp = v_dispi / ni;
assert ( pp[i].v_disp > 0. );
}
}
template <class Tpsys>
PS::F64 calcRandomVelAll(Tpsys & pp,
const PS::S32 n_tot,
const PS::S32 n_loc)
{
#ifdef ISOTROPIC
PS::F64vec v_ave_loc = 0.;
PS::F64vec v_ave_glb = 0.;
PS::F64 v_sq_loc = 0.;
PS::F64 v_sq_glb = 0.;
#else
PS::F64 v_disp_loc = 0.;
#endif
PS::F64 v_disp_glb = 0.;
PS::S32 n_ptcl_loc = 0;
PS::S32 n_ptcl_glb = 0;
#ifdef ISOTROPIC
#pragma omp parallel for reduction (+:v_ave_loc, v_sq_loc, n_ptcl_loc)
for(PS::S32 i=0; i<n_loc; i++){
PS::F64vec vec = pp[i].vel;
v_ave_loc += vec;
v_sq_loc += vec*vec;
n_ptcl_loc ++;
}
v_ave_glb = PS::Comm::getSum(v_ave_loc);
v_sq_glb = PS::Comm::getSum(v_sq_loc);
n_ptcl_glb = PS::Comm::getSum(n_ptcl_loc);
assert ( n_ptcl_glb == n_tot );
v_disp_glb = sqrt(v_sq_glb / n_ptcl_glb - v_ave_glb*v_ave_glb);
#else //ISOTROPIC
#pragma omp parallel for reduction (+:v_disp_loc, n_ptcl_loc)
for(PS::S32 i=0; i<n_loc; i++){
#if 1
PS::F64vec pos = pp[i].pos;
PS::F64 r2 = pos.x*pos.x + pos.y*pos.y;
PS::F64 ri = sqrt(r2);
PS::F64vec v_kep;
v_kep.x=-pos.y/ri ; v_kep.y=pos.x/ri; v_kep.z=0.;
v_kep *= pp[i].getKeplerVelocity();
PS::F64vec v_ran = pp[i].vel - v_kep;
#else
PS::F64 ax;
PS::F64 ecc = pp[i].getEccentricity(ax);
PS::F64vec h;
PS::F64 inc = pp[i].getInclination(h);
PS::F64vec v_kep = pp[i].getKeplerVelocity();
PS::F64 v_ran = (ecc*ecc + inc*inc) * v_kep*v_kep;
#endif
v_disp_loc += v_ran * v_ran;
n_ptcl_loc ++;
}
v_disp_glb = PS::Comm::getSum(v_disp_loc);
n_ptcl_glb = PS::Comm::getSum(n_ptcl_loc);
assert ( n_ptcl_glb == n_tot );
v_disp_glb = sqrt(v_disp_glb / n_ptcl_glb);
#endif //ISOTROPIC
return v_disp_glb;
}
#ifdef USE_INDIVIDUAL_CUTOFF
template <class Tpsys>
void setCutoffRadii(Tpsys & pp)
{
const PS::S32 n_loc = pp.getNumberOfParticleLocal();
const PS::S32 n_tot = pp.getNumberOfParticleGlobal();
#ifndef CONSTANT_RANDOM_VELOCITY
calcRandomVel(pp, n_tot, n_loc);
#endif
#pragma omp parallel for
for(PS::S32 i=0; i<n_loc; i++){
pp[i].setROutRSearch();
//pp[i].setRPlanet();
}
}
#else //USE_INDIVIDUAL_CUTOFF
template <class Tpsys>
void setCutoffRadii(Tpsys & pp)
{
const PS::S32 n_loc = pp.getNumberOfParticleLocal();
const PS::S32 n_tot = pp.getNumberOfParticleGlobal();
#ifndef CONSTANT_RANDOM_VELOCITY
PS::F64 v_disp_glb = calcRandomVelAll(pp, n_tot, n_loc);
#else
PS::F64 v_disp_glb = v_disp;
#endif
PS::F64 rHill_a_loc = 0.;
#pragma omp parallel for reduction (max: rHill_a_loc)
for(PS::S32 i=0; i<n_loc; i++){
#ifndef WITHOUT_SUN
PS::F64 ax = pp[i].getSemimajorAxis2();
PS::F64 rHill_a = pp[i].getRHill() * pow(ax,-FPGrav::p_cut);
rHill_a_loc = std::max(rHill_a, rHill_a_loc);
#else
rHill_a_loc = std::max(pp[i].mass, rHill_a_loc);
#endif
//pp[i].setRPlanet();
}
PS::F64 rHill_a_glb = PS::Comm::getMaxValue(rHill_a_loc);
FPGrav::setROutRSearch(rHill_a_glb, v_disp_glb);
}
#endif //USE_INDIVIDUAL_CUTOFF
//////////////////////
/// Super Particle ///
//////////////////////
class MyMomentMonopole : public PS::MomentMonopole {
public:
#ifdef USE_POLAR_COORDINATE
PS::F64vec pos_car;
MyMomentMonopole() {
pos_car = 0.;
}
MyMomentMonopole(const PS::F64 m, const PS::F64vec & p, const PS::F64vec & p_car) : PS::MomentMonopole(m, p) {
pos_car = p_car;
}
void init(){
PS::MomentMonopole::init();
pos_car = 0.;
}
template<class Tepj>
void accumulateAtLeaf(const Tepj & epj){
PS::MomentMonopole::accumulateAtLeaf(epj);
pos_car += epj.getCharge() * epj.getPosCar();
}
template<class Tepj>
void accumulateAtLeaf2(const Tepj & epj){}
void accumulate(const MyMomentMonopole & mom){
PS::MomentMonopole::accumulate(mom);
pos_car += mom.mass * mom.pos_car;
}
void accumulate2(const MyMomentMonopole & mom){}
// for DEBUG
void dump(std::ostream & fout = std::cout) const {
fout<<"mass="<<mass<<std::endl;
fout<<"pos="<<pos<<std::endl;
fout<<"pos_car="<<pos_car<<std::endl;
}
#endif
void set(){
pos = (mass != 0.) ? pos / mass : 0.;
#ifdef USE_POLAR_COORDINATE
pos_car = (mass != 0.) ? pos_car / mass : 0.;
#endif
}
};
class MySPJMonopole : public PS::SPJMonopole {
public:
#ifdef USE_POLAR_COORDINATE
PS::F64vec pos_car;
template<class Tmom>
void copyFromMoment(const Tmom & mom){
PS::SPJMonopole::copyFromMoment(mom);
this->pos_car = mom.pos_car;
}
void clear(){
PS::SPJMonopole::clear();
pos_car = 0.0;
}
PS::F64vec getPosCar() const { return pos_car; }
MyMomentMonopole convertToMoment() const {
return MyMomentMonopole(mass, pos, pos_car);
}
#endif
};
class MyMomentQuadrupole : public PS::MomentQuadrupole {
public:
#ifdef USE_POLAR_COORDINATE
PS::F64vec pos_car;
MyMomentQuadrupole(){
pos_car = 0.;
}
MyMomentQuadrupole(const PS::F64 m, const PS::F64vec & p, const PS::F64mat & q, const PS::F64vec & p_car) : PS::MomentQuadrupole(m, p, q) {
pos_car = p_car;
}
void init(){
PS::MomentQuadrupole::init();
pos_car = 0.;
}
template<class Tepj>
void accumulateAtLeaf(const Tepj & epj){
PS::MomentQuadrupole::accumulateAtLeaf(epj);
pos_car += epj.getCharge() * epj.getPosCar();
}
template<class Tepj>
void accumulateAtLeaf2(const Tepj & epj){
PS::F64 ctmp = epj.getCharge();
PS::F64vec ptmp = epj.getPosCar() - this->pos_car;
PS::F64 cx = ctmp * ptmp.x;
PS::F64 cy = ctmp * ptmp.y;
PS::F64 cz = ctmp * ptmp.z;
this->quad.xx += cx * ptmp.x;
this->quad.yy += cy * ptmp.y;
this->quad.zz += cz * ptmp.z;
this->quad.xy += cx * ptmp.y;
this->quad.xz += cx * ptmp.z;
this->quad.yz += cy * ptmp.z;
}
void accumulate(const MyMomentQuadrupole & mom){
PS::MomentQuadrupole::accumulate(mom);
pos_car += mom.mass * mom.pos_car;
}
void accumulate2(const MyMomentQuadrupole & mom){
PS::F64 mtmp = mom.mass;
PS::F64vec ptmp = mom.pos_car - this->pos_car;
PS::F64 cx = mtmp * ptmp.x;
PS::F64 cy = mtmp * ptmp.y;
PS::F64 cz = mtmp * ptmp.z;
this->quad.xx += cx * ptmp.x + mom.quad.xx;
this->quad.yy += cy * ptmp.y + mom.quad.yy;
this->quad.zz += cz * ptmp.z + mom.quad.zz;
this->quad.xy += cx * ptmp.y + mom.quad.xy;
this->quad.xz += cx * ptmp.z + mom.quad.xz;
this->quad.yz += cy * ptmp.z + mom.quad.yz;
}
void dump(std::ostream & fout = std::cout) const {
fout<<"mass= "<<mass<<std::endl;
fout<<"pos= "<<pos<<std::endl;
fout<<"pos_car="<<pos_car<<std::endl;
fout<<"quad= "<<quad<<std::endl;
}
#endif
void set(){
pos = (mass != 0.) ? pos / mass : 0.;
#ifdef USE_POLAR_COORDINATE
pos_car = (mass != 0.) ? pos_car / mass : 0.;
#endif
}
};
class MySPJQuadrupole : public PS::SPJQuadrupole {
public:
#ifdef USE_POLAR_COORDINATE
PS::F64vec pos_car;
PS::F64 getCharge() const { return mass; }
PS::F64vec getPos() const { return pos; }
PS::F64vec getPosCar() const { return pos_car; }
void copyFromMoment(const MyMomentQuadrupole & mom){
PS::SPJQuadrupole::copyFromMoment(mom);
pos_car = mom.pos_car;
}
MyMomentQuadrupole convertToMoment() const {
return MyMomentQuadrupole(mass, pos, quad, pos_car);
}
void clear(){
PS::SPJQuadrupole::clear();
pos_car = 0.0;
}
#endif
};
|
krb5pa-sha1_fmt_plug.c | /*
* Kerberos 5 "PA ENC TIMESTAMP" by magnum (modified by Dhiru)
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum'
*
* NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ
* packet.
*
* Default Salt: realm + user
*
* AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5
* See the following RFC for more details about the crypto & algorithms used:
*
* RFC3961 - Encryption and Checksum Specifications for Kerberos 5
* RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5
*
* march 09 / kevin devine <wyse101 0x40 gmail.com>
*
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and
* released under same terms as above
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5pa;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5pa);
#else
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "johnswap.h"
#include "aes/aes.h"
#include "gladman_fileenc.h"
#include "pbkdf2_hmac_sha1.h"
#include "loader.h"
#include "memdbg.h"
#define FORMAT_LABEL "krb5pa-sha1"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */
#ifdef MMX_COEF
#define ALGORITHM_NAME SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 12
#define BINARY_ALIGN 4
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define MAX_SALTLEN 128
#define MAX_REALMLEN 64
#define MAX_USERLEN 64
#define TIMESTAMP_SIZE 44
#define CHECKSUM_SIZE BINARY_SIZE
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
#define HEXCHARS "0123456789abcdefABCDEF"
static struct fmt_tests tests[] = {
{"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"},
{"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"},
{"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
/* etype 17 hash obtained using MiTM etype downgrade attack */
{"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"},
{NULL},
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int etype;
unsigned char realm[64];
unsigned char user[64];
unsigned char salt[128]; /* realm + user */
unsigned char ct[44];
} *cur_salt;
static unsigned char constant[16];
static unsigned char ke_input[16];
static unsigned char ki_input[16];
/* n-fold(k-bits):
* l = lcm(n,k)
* r = l/k
* s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1)
* compute the 1's complement sum:
* n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */
/* representation: msb first, assume n and k are multiples of 8, and
* that k>=16. this is the case of all the cryptosystems which are
* likely to be used. this function can be replaced if that
* assumption ever fails. */
/* input length is in bits */
static void nfold(unsigned int inbits, const unsigned char *in,
unsigned int outbits,unsigned char *out)
{
int a,b,c,lcm;
int byte, i, msbit;
/* the code below is more readable if I make these bytes
* instead of bits */
inbits >>= 3;
outbits >>= 3;
/* first compute lcm(n,k) */
a = outbits;
b = inbits;
while (b != 0) {
c = b;
b = a % b;
a = c;
}
lcm = outbits*inbits/a;
/* now do the real work */
memset(out, 0, outbits);
byte = 0;
/* this will end up cycling through k lcm(k,n)/k times, which
* is correct */
for (i = lcm - 1; i >= 0; i--) {
/* compute the msbit in k which gets added into this byte */
msbit = (/* first, start with the msbit in the first, unrotated byte */
((inbits << 3) - 1)
/* then, for each byte, shift to the right for each
* repetition */
+(((inbits << 3) + 13) * (i / inbits))
/* last, pick out the correct byte within that
* shifted repetition */
+((inbits - (i % inbits)) << 3)
) % (inbits << 3);
/* pull out the byte value itself */
byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
(in[((inbits) - (msbit>>3)) % inbits]))
>>((msbit & 7) + 1)) & 0xff;
/* do the addition */
byte += out[i % outbits];
out[i % outbits] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;
}
/* if there's a carry bit left over, add it back in */
if (byte) {
for (i = outbits - 1; i >= 0; i--) {
/* do the addition */
byte += out[i];
out[i] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;\
}
}
}
static void init(struct fmt_main *self)
{
unsigned char usage[5];
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
// generate 128 bits from 40 bits of "kerberos" string
nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0xAA; // used to derive Ke
nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0x55; // used to derive Ki
nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext;
int type, saltlen = 0;
// tag is mandatory
if (strncmp(ciphertext, "$krb5pa$", 8) != 0)
return 0;
data += 8;
// etype field, 17 or 18
p = strchr(data, '$');
if (!p || p - data != 2)
return 0;
type = atoi(data);
if (type < 17 || type > 18)
return 0;
data = p + 1;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
saltlen += p - data;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
saltlen += p - data;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p)
return 0;
// if salt is empty, realm.user is used instead
if (p - data)
saltlen = p - data;
data = p + 1;
// We support a max. total salt length of 52.
// We could opt to emit a warning if rejected here.
if(saltlen > MAX_SALTLEN) {
static int warned = 0;
if (!ldr_in_pot)
if (!warned++)
fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL);
return 0;
}
// 56 bytes (112 hex chars) encrypted timestamp + checksum
if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) ||
strspn(data, HEXCHARS) != strlen(data))
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += 8;
p = strtok(ctcopy, "$");
cs.etype = atoi(p);
p = strtok(NULL, "$");
if (p[-1] == '$')
cs.user[0] = 0;
else {
strcpy((char*)cs.user, p);
p = strtok(NULL, "$");
}
if (p[-1] == '$')
cs.realm[0] = 0;
else {
strcpy((char*)cs.realm, p);
p = strtok(NULL, "$");
}
if (p[-1] == '$') {
strcpy((char*)cs.salt, (char*)cs.realm);
strcat((char*)cs.salt, (char*)cs.user);
} else {
strcpy((char*)cs.salt, p);
p = strtok(NULL, "$");
}
for (i = 0; i < TIMESTAMP_SIZE; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TOTAL_LENGTH + 1];
char in[TOTAL_LENGTH + 1];
char salt[MAX_SALTLEN + 1];
char *data;
char *e, *u, *r, *s, *tc;
strnzcpy(in, ciphertext, sizeof(in));
tc = strrchr(in, '$'); *tc++ = 0;
s = strrchr(in, '$'); *s++ = 0;
r = strrchr(in, '$'); *r++ = 0;
u = strrchr(in, '$'); *u++ = 0;
e = in + 8;
/* Default salt is user.realm */
if (!*s) {
snprintf(salt, sizeof(salt), "%s%s", r, u);
s = salt;
}
snprintf(out, sizeof(out), "$krb5pa$%s$%s$%s$%s$%s", e, u, r, s, tc);
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void
AES_cts_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const AES_KEY *key,
unsigned char *ivec, const int encryptp)
{
unsigned char tmp[AES_BLOCK_SIZE];
unsigned int i;
if (encryptp) {
while(len > AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++)
tmp[i] = in[i] ^ ivec[i];
AES_encrypt(tmp, out, key);
memcpy(ivec, out, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
for (i = 0; i < len; i++)
tmp[i] = in[i] ^ ivec[i];
for (; i < AES_BLOCK_SIZE; i++)
tmp[i] = 0 ^ ivec[i];
AES_encrypt(tmp, out - AES_BLOCK_SIZE, key);
memcpy(out, ivec, len);
memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
} else {
unsigned char tmp2[AES_BLOCK_SIZE];
unsigned char tmp3[AES_BLOCK_SIZE];
while(len > AES_BLOCK_SIZE * 2) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(in, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
len -= AES_BLOCK_SIZE;
memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */
AES_decrypt(in, tmp2, key);
memcpy(tmp3, in + AES_BLOCK_SIZE, len);
memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */
for (i = 0; i < len; i++)
out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i];
AES_decrypt(tmp3, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
}
// keysize = 32 for 256 bits, 16 for 128 bits
static void dk(unsigned char key_out[], unsigned char key_in[],
size_t key_size, unsigned char ptext[], size_t ptext_size)
{
unsigned char iv[32];
unsigned char plaintext[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
memset(plaintext,0,sizeof(plaintext));
memcpy(plaintext,ptext,16);
AES_set_encrypt_key(key_in,key_size*8,&ekey);
AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT);
}
static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_decrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT);
}
#if 0 /* This is not used */
static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_encrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_ENCRYPT);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char tkey[MAX_KEYS_PER_CRYPT][32];
unsigned char base_key[32];
unsigned char Ke[32];
unsigned char plaintext[44];
int key_size, i;
int len[MAX_KEYS_PER_CRYPT];
#ifdef MMX_COEF
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = tkey[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt,strlen((char*)cur_salt->salt), 4096, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[index+i]);
}
pbkdf2_sha1((const unsigned char*)saved_key[index], len[0],
cur_salt->salt,strlen((char*)cur_salt->salt),
4096, tkey[0], 32, 0);
#if !ARCH_LITTLE_ENDIAN
{
int i;
for (i = 0; i < 32/sizeof(ARCH_WORD_32); ++i) {
((ARCH_WORD_32*)tkey[0])[i] = JOHNSWAP(((ARCH_WORD_32*)tkey[0])[i]);
}
}
#endif
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
// generate 128 bits from 40 bits of "kerberos" string
// This is precomputed in init()
//nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
if (cur_salt->etype == 17)
key_size = 16;
else
key_size = 32;
dk(base_key, tkey[i], key_size, constant, 32);
/* The "well-known constant" used for the DK function is the key usage number,
* expressed as four octets in big-endian order, followed by one octet indicated below.
* Kc = DK(base-key, usage | 0x99);
* Ke = DK(base-key, usage | 0xAA);
* Ki = DK(base-key, usage | 0x55); */
// derive Ke for decryption/encryption
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0xAA; // used to derive Ke
//nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
dk(Ke, base_key, key_size, ke_input, 32);
// decrypt the AS-REQ timestamp encrypted with 256-bit AES
// here is enough to check the string, further computation below is required
// to fully verify the checksum
krb_decrypt(cur_salt->ct,44,plaintext,Ke, key_size);
// Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and
// bail out if we are out of luck.
if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') {
unsigned char Ki[32];
unsigned char checksum[20];
// derive Ki used in HMAC-SHA-1 checksum
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0x55; // used to derive Ki
//nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
dk(Ki,base_key, key_size, ki_input, 32);
// derive checksum of plaintext
hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20);
memcpy(crypt_out[index+i], checksum, BINARY_SIZE);
} else {
memset(crypt_out[index+i], 0, BINARY_SIZE);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_krb5pa = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__isne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int8)
// A*D function (colscale): GB (_AxD__isne_int8)
// D*A function (rowscale): GB (_DxB__isne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int8)
// C=scalar+B GB (_bind1st__isne_int8)
// C=scalar+B' GB (_bind1st_tran__isne_int8)
// C=A+scalar GB (_bind2nd__isne_int8)
// C=A'+scalar GB (_bind2nd_tran__isne_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_udr.3.c | /*
* @@name: udr.3.c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
#define N 100
struct mx_s {
float value;
int index;
};
/* prototype functions for combiner and initializer in
the declare reduction */
void mx_combine(struct mx_s *out, struct mx_s *in);
void mx_init(struct mx_s *priv, struct mx_s *orig);
#pragma omp declare reduction(maxloc: struct mx_s: \
mx_combine(&omp_out, &omp_in)) \
initializer(mx_init(&omp_priv, &omp_orig))
void mx_combine(struct mx_s *out, struct mx_s *in)
{
if ( out->value < in->value ) {
out->value = in->value;
out->index = in->index;
}
}
void mx_init(struct mx_s *priv, struct mx_s *orig)
{
priv->value = orig->value;
priv->index = orig->index;
}
int main(void)
{
struct mx_s mx;
float val[N], d;
int i, count = N;
for (i = 0; i < count; i++) {
d = (N*0.8f - i);
val[i] = N * N - d * d;
}
mx.value = val[0];
mx.index = 0;
#pragma omp parallel for reduction(maxloc: mx)
for (i = 1; i < count; i++) {
if (mx.value < val[i])
{
mx.value = val[i];
mx.index = i;
}
}
printf("max value = %g, index = %d\n", mx.value, mx.index);
/* prints 10000, 80 */
return 0;
}
|
GB_unaryop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_fp64
// op(A') function: GB_tran__abs_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_fp64
(
double *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ccavl_impl.h | //Copyright (c) 2010 Philip W. Howard
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files (the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included in
//all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
//THE SOFTWARE.
/*
* File: ccavl.h
* Author: Trevor Brown
*
* Substantial improvements to interface, memory reclamation and bug fixing.
*
* Created on June 7, 2017, 9:52 AM
*/
#ifndef CCAVL_H
#define CCAVL_H
#include "record_manager.h"
//#if (INDEX_STRUCT == IDX_CCAVL_SPIN)
//#define SPIN_LOCK
//#elif (INDEX_STRUCT == IDX_CCAVL_BASELINE)
//#define BASELINE
////uses pthread mutex
//#else
//#error
//#endif
//#ifdef SPIN_LOCK
typedef pthread_spinlock_t ptlock_t;
#define lock_size sizeof(ptlock_t)
#define mutex_init(lock) pthread_spin_init(lock, PTHREAD_PROCESS_PRIVATE)
#define mutex_destroy(lock) pthread_spin_destroy(lock)
#define mutex_lock(lock) pthread_spin_lock(lock)
#define mutex_unlock(lock) pthread_spin_unlock(lock)
//#else
//typedef pthread_mutex_t ptlock_t;
//#define lock_size sizeof(ptlock_t)
//#define mutex_init(lock) pthread_mutex_init(lock, NULL)
//#define mutex_destroy(lock) pthread_mutex_destroy(lock)
//#define mutex_lock(lock) pthread_mutex_lock(lock)
//#define mutex_unlock(lock) pthread_mutex_unlock(lock)
//#endif
#define lock_mb() asm volatile("":::"memory")
#define IMPLEMENTED 1
#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define ABS(a) ( (a) > 0 ? (a) : -(a) )
typedef unsigned long long version_t;
#ifndef PAD_SIZE
#define PAD_SIZE 128
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
template <typename skey_t, typename sval_t>
struct node_t {
#ifndef BASELINE
skey_t key;
struct node_t<skey_t, sval_t> * volatile left;
struct node_t<skey_t, sval_t> * volatile right;
volatile version_t changeOVL;
struct node_t * volatile parent;
sval_t value;
ptlock_t lock; //note: used to be a pointer to a lock!
volatile int height;
#ifdef PAD_NODES
char pad[PAD_SIZE];
#endif
#else
skey_t key;
sval_t value;
struct node_t<skey_t, sval_t> * volatile left;
struct node_t<skey_t, sval_t> * volatile right;
struct node_t<skey_t, sval_t> * volatile parent;
unsigned long index;
long color;
ptlock_t lock;
volatile int height;
volatile version_t changeOVL;
#endif
};
/** This is a special value that indicates the presence of a null value,
* to differentiate from the absence of a value.
*/
static void * t_SpecialNull; // reserve an address
static void * SpecialNull = (void *) &t_SpecialNull; // this hack implies sval_t must be a pointer!
/** This is a special value that indicates that an optimistic read
* failed.
*/
static void * t_SpecialRetry;
static void * SpecialRetry = (void *) &t_SpecialRetry; // this hack implies sval_t must be a pointer!
/** The number of spins before yielding. */
#define SPIN_COUNT 100
/** The number of yields before blocking. */
#define YIELD_COUNT 0
// we encode directions as characters
#define LEFT 'L'
#define RIGHT 'R'
// return type for extreme searches
#define ReturnKey 0
#define ReturnEntry 1
#define ReturnNode 2
#define OVL_BITS_BEFORE_OVERFLOW 8
#define UnlinkedOVL (1LL)
#define OVLGrowLockMask (2LL)
#define OVLShrinkLockMask (4LL)
#define OVLGrowCountShift (3)
#define OVLShrinkCountShift (OVLGrowCountShift + OVL_BITS_BEFORE_OVERFLOW)
#define OVLGrowCountMask (((1L << OVL_BITS_BEFORE_OVERFLOW ) - 1) << OVLGrowCountShift)
#define UpdateAlways 0
#define UpdateIfAbsent 1
#define UpdateIfPresent 2
#define UpdateIfEq 3
#define UnlinkRequired -1
#define RebalanceRequired -2
#define NothingRequired -3
template <typename skey_t, typename sval_t, class RecMgr>
class ccavl {
private:
PAD;
RecMgr * const recmgr;
// PAD;
node_t<skey_t, sval_t> * root;
// PAD;
int init[MAX_THREADS_POW2] = {0,};
// PAD;
node_t<skey_t, sval_t> * rb_alloc(const int tid);
node_t<skey_t, sval_t>* rbnode_create(const int tid, skey_t key, sval_t value, node_t<skey_t, sval_t>* parent);
sval_t get(const int tid, node_t<skey_t, sval_t>* tree, skey_t key);
sval_t put(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t value);
sval_t putIfAbsent(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t value);
sval_t attemptNodeUpdate(
const int tid,
int func,
sval_t expected,
sval_t newValue,
node_t<skey_t, sval_t>* parent,
node_t<skey_t, sval_t>* curr);
int attemptUnlink_nl(const int tid, node_t<skey_t, sval_t>* parent, node_t<skey_t, sval_t>* curr);
sval_t remove_node(const int tid, node_t<skey_t, sval_t>* tree, skey_t key);
int attemptInsertIntoEmpty(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t vOpt);
sval_t attemptUpdate(
const int tid,
skey_t key,
int func,
sval_t expected,
sval_t newValue,
node_t<skey_t, sval_t>* parent,
node_t<skey_t, sval_t>* curr,
version_t nodeOVL);
sval_t update(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, int func, sval_t expected, sval_t newValue);
node_t<skey_t, sval_t>* rebalance_nl(const int tid, node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n);
void fixHeightAndRebalance(const int tid, node_t<skey_t, sval_t>* curr);
node_t<skey_t, sval_t>* get_child(node_t<skey_t, sval_t>* curr, char dir);
void setChild(node_t<skey_t, sval_t>* curr, char dir, node_t<skey_t, sval_t>* new_node);
void waitUntilChangeCompleted(node_t<skey_t, sval_t>* curr, version_t ovl);
int height(volatile node_t<skey_t, sval_t>* curr);
sval_t decodeNull(sval_t v);
sval_t encodeNull(sval_t v);
sval_t getImpl(node_t<skey_t, sval_t>* tree, skey_t key);
sval_t attemptGet(skey_t key,
node_t<skey_t, sval_t>* curr,
char dirToC,
version_t nodeOVL);
int shouldUpdate(int func, sval_t prev, sval_t expected);
int nodeCondition(node_t<skey_t, sval_t>* curr);
node_t<skey_t, sval_t>* fixHeight_nl(node_t<skey_t, sval_t>* curr);
node_t<skey_t, sval_t>* rebalanceToRight_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nL, int hR0);
node_t<skey_t, sval_t>* rebalanceToLeft_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nL, int hR0);
node_t<skey_t, sval_t>* rotateRight_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nL, node_t<skey_t, sval_t>* nLR, int hR, int hLL, int hLR);
node_t<skey_t, sval_t>* rotateLeft_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nR, node_t<skey_t, sval_t>* nRL, int hL, int hRL, int hRR);
node_t<skey_t, sval_t>* rotateLeftOverRight_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nR, node_t<skey_t, sval_t>* nRL, int hL, int hRR, int hRLR);
node_t<skey_t, sval_t>* rotateRightOverLeft_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n, node_t<skey_t, sval_t>* nL, node_t<skey_t, sval_t>* nLR, int hR, int hLL, int hLRL);
public:
// PAD;
const int NUM_PROCESSES;
skey_t KEY_NEG_INFTY;
PAD;
ccavl(const int numProcesses, const skey_t& _KEY_NEG_INFTY)
: recmgr(new RecMgr(numProcesses, SIGQUIT))
, NUM_PROCESSES(numProcesses)
, KEY_NEG_INFTY(_KEY_NEG_INFTY) {
const int tid = 0;
initThread(tid);
recmgr->endOp(tid);
root = rbnode_create(tid, KEY_NEG_INFTY, NULL, NULL);
}
RecMgr * debugGetRecMgr() {
return recmgr;
}
private:
uint64_t dfsDeallocateBottomUp(node_t<skey_t, sval_t> * const node) {
if (node == NULL) {
return 0;
}
uint64_t sumL = dfsDeallocateBottomUp(node->left);
uint64_t sumR = dfsDeallocateBottomUp(node->right);
recmgr->deallocate(0 /* tid */, node);
return 1 + sumL + sumR;
}
void dfsDeallocateBottomUp_omp_end(node_t<skey_t, sval_t> * const node/*, volatile int * numNodes*/) {
if (node == NULL) return;
dfsDeallocateBottomUp_omp_end(node->left/*, numNodes*/);
dfsDeallocateBottomUp_omp_end(node->right/*, numNodes*/);
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
recmgr->deallocate(tid, node);
// __sync_fetch_and_add(numNodes, 1);
}
void dfsDeallocateBottomUp_omp(node_t<skey_t, sval_t> * const node, int depth/*, volatile int * numNodes*/) {
if (node == NULL) return;
if (depth == 8) {
#pragma omp task
dfsDeallocateBottomUp_omp_end(node/*, numNodes*/);
} else {
dfsDeallocateBottomUp_omp(node->left, 1+depth/*, numNodes*/);
dfsDeallocateBottomUp_omp(node->right, 1+depth/*, numNodes*/);
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
recmgr->deallocate(tid, node);
// __sync_fetch_and_add(numNodes, 1);
}
}
public:
~ccavl() {
std::cout<<"ccavl destructor"<<std::endl;
//auto numNodes = dfsDeallocateBottomUp(root);
// volatile int numNodes = 0;
// omp_set_num_threads(20);
#pragma omp parallel
{
#pragma omp single
dfsDeallocateBottomUp_omp(root, 0/*, &numNodes*/);
}
// std::cout<<" deallocated "<<numNodes<<std::endl;
recmgr->printStatus();
delete recmgr;
}
void initThread(const int tid) {
if (init[tid]) return; else init[tid] = !init[tid];
recmgr->initThread(tid);
}
void deinitThread(const int tid) {
if (!init[tid]) return; else init[tid] = !init[tid];
recmgr->deinitThread(tid);
}
sval_t insertIfAbsent(const int tid, skey_t key, sval_t val) {
return putIfAbsent(tid, root, key, val);
}
sval_t insertReplace(const int tid, skey_t key, sval_t val) {
return put(tid, root, key, val);
}
sval_t find(const int tid, skey_t key) {
return get(tid, root, key);
}
sval_t erase(const int tid, skey_t key) {
return remove_node(tid, root, key);
}
node_t<skey_t, sval_t> * get_root() {
return root;
}
node_t<skey_t, sval_t> * get_left(node_t<skey_t, sval_t> * curr) {
return curr->left;
}
node_t<skey_t, sval_t> * get_right(node_t<skey_t, sval_t> * curr) {
return curr->right;
}
long long getKeyChecksum(node_t<skey_t, sval_t> * curr) {
if (curr == NULL) return 0;
node_t<skey_t, sval_t> * left = get_left(curr);
node_t<skey_t, sval_t> * right = get_right(curr);
return ((long long) ((curr->value != NULL) ? curr->key : 0))
+ getKeyChecksum(left) + getKeyChecksum(right);
}
long long getKeyChecksum() {
return getKeyChecksum(get_right(root));
}
long long getSize(node_t<skey_t, sval_t> * curr) {
if (curr == NULL) return 0;
node_t<skey_t, sval_t> * left = get_left(curr);
node_t<skey_t, sval_t> * right = get_right(curr);
return (curr->value != NULL) + getSize(left) + getSize(right);
}
bool validateStructure() {
return true;
}
long long getSize() {
return getSize(get_right(root));
}
long long getSizeInNodes(node_t<skey_t, sval_t> * const curr) {
if (curr == NULL) return 0;
return 1 + getSizeInNodes(get_left(curr)) + getSizeInNodes(get_right(curr));
}
long long getSizeInNodes() {
return getSizeInNodes(root);
}
void printSummary() {
recmgr->printStatus();
}
};
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t> * ccavl<skey_t, sval_t, RecMgr>::rb_alloc(const int tid) {
node_t<skey_t, sval_t> * result = recmgr->template allocate<node_t<skey_t, sval_t> >(tid);
if (result == NULL) {
setbench_error("out of memory");
}
return result;
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rbnode_create(const int tid, skey_t key, sval_t value, node_t<skey_t, sval_t>* parent) {
node_t<skey_t, sval_t> * nnode = rb_alloc(tid);
nnode->key = key;
nnode->value = value;
nnode->right = NULL;
nnode->left = NULL;
nnode->parent = parent;
if (mutex_init(&(nnode->lock)) != 0) {
printf("\n mutex init failed\n");
}
nnode->height = 1;
nnode->changeOVL = 0;
return nnode;
}
static int isChanging(version_t ovl) {
return (ovl & (OVLShrinkLockMask | OVLGrowLockMask)) != 0;
}
static int isUnlinked(version_t ovl) {
return ovl == UnlinkedOVL;
}
static int isShrinkingOrUnlinked(version_t ovl) {
return (ovl & (OVLShrinkLockMask | UnlinkedOVL)) != 0;
}
static int isChangingOrUnlinked(version_t ovl) {
return (ovl & (OVLShrinkLockMask | OVLGrowLockMask | UnlinkedOVL)) != 0;
}
static int hasShrunkOrUnlinked(version_t orig, version_t current) {
return ((orig ^ current) & ~(OVLGrowLockMask | OVLGrowCountMask)) != 0;
}
/*
static int hasChangedOrUnlinked(version_t orig, version_t current) {
return orig != current;
}
*/
static version_t beginGrow(version_t ovl) {
assert(!isChangingOrUnlinked(ovl));
return ovl | OVLGrowLockMask;
}
static version_t endGrow(version_t ovl) {
assert(!isChangingOrUnlinked(ovl));
// Overflows will just go into the shrink lock count, which is fine.
return ovl + (1L << OVLGrowCountShift);
}
static version_t beginShrink(version_t ovl) {
assert(!isChangingOrUnlinked(ovl));
return ovl | OVLShrinkLockMask;
}
static version_t endShrink(version_t ovl) {
assert(!isChangingOrUnlinked(ovl));
// increment overflows directly
return ovl + (1L << OVLShrinkCountShift);
}
//***************************************************
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::get_child(node_t<skey_t, sval_t>* curr, char dir) {
return dir == LEFT ? curr->left : curr->right;
}
// node should be locked
template <typename skey_t, typename sval_t, class RecMgr>
void ccavl<skey_t, sval_t, RecMgr>::setChild(node_t<skey_t, sval_t>* curr, char dir, node_t<skey_t, sval_t>* new_node) {
if (dir == LEFT) {
assert(curr->left == NULL);
curr->left = new_node;
} else {
assert(curr->right == NULL);
curr->right = new_node;
}
}
//////// per-node blocking
template <typename skey_t, typename sval_t, class RecMgr>
void ccavl<skey_t, sval_t, RecMgr>::waitUntilChangeCompleted(node_t<skey_t, sval_t>* curr, version_t ovl) {
int tries;
if (!isChanging(ovl)) {
return;
}
for (tries = 0; tries < SPIN_COUNT; /*++tries*/) {
if (curr->changeOVL != ovl) {
return;
}
}
// spin and yield failed, use the nuclear option
mutex_lock(&(curr->lock));
// we can't have gotten the lock unless the shrink was over
mutex_unlock(&(curr->lock));
assert(curr->changeOVL != ovl);
}
//////// node access functions
template <typename skey_t, typename sval_t, class RecMgr>
int ccavl<skey_t, sval_t, RecMgr>::height(volatile node_t<skey_t, sval_t>* curr) {
return curr == NULL ? 0 : curr->height;
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::decodeNull(sval_t vOpt) {
assert(vOpt != SpecialRetry);
return vOpt == (sval_t) SpecialNull ? (sval_t) NULL : vOpt;
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::encodeNull(sval_t v) {
return v == (sval_t) NULL ? (sval_t) SpecialNull : v;
}
//////// search
/** Returns either a value or SpecialNull, if present, or null, if absent. */
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::getImpl(node_t<skey_t, sval_t>* tree, skey_t key) {
node_t<skey_t, sval_t>* right;
version_t ovl;
//long rightCmp;
sval_t vo;
while (1) {
right = (node_t<skey_t, sval_t>*) tree->right;
if (right == NULL) {
return NULL;
} else {
//rightCmp = key - right->key;
if (key == right->key) {
// who cares how we got here
return right->value;
}
ovl = right->changeOVL;
if (isShrinkingOrUnlinked(ovl)) {
waitUntilChangeCompleted(right, ovl);
// RETRY
} else if (right == tree->right) {
// the reread of .right is the one protected by our read of ovl
vo = attemptGet(key, right, (key < right->key ? LEFT : RIGHT), ovl);
if (vo != SpecialRetry) {
return vo;
}
// else RETRY
}
}
}
}
// return a value
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::get(const int tid, node_t<skey_t, sval_t>* tree, skey_t key) {
auto guard = recmgr->getGuard(tid, true);
auto retval = decodeNull(getImpl(tree, key));
return retval;
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::attemptGet(skey_t key,
node_t<skey_t, sval_t>* curr,
char dirToC,
version_t nodeOVL) {
node_t<skey_t, sval_t>* child;
//long childCmp;
version_t childOVL;
sval_t vo;
while (1) {
child = get_child(curr, dirToC);
if (child == NULL) {
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
// Note is not present. Read of node.child occurred while
// parent.child was valid, so we were not affected by any
// shrinks.
return NULL;
} else {
//childCmp = key - child->key;
if (key == child->key) {
// how we got here is irrelevant
return child->value;
}
// child is non-null
childOVL = child->changeOVL;
if (isShrinkingOrUnlinked(childOVL)) {
waitUntilChangeCompleted(child, childOVL);
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
// else RETRY
} else if (child != get_child(curr, dirToC)) {
// this .child is the one that is protected by childOVL
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
// else RETRY
} else {
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
// At this point we know that the traversal our parent took
// to get to node is still valid. The recursive
// implementation will validate the traversal from node to
// child, so just prior to the nodeOVL validation both
// traversals were definitely okay. This means that we are
// no longer vulnerable to node shrinks, and we don't need
// to validate nodeOVL any more.
vo = attemptGet(key, child, (key < child->key ? LEFT : RIGHT), childOVL);
if (vo != (sval_t) SpecialRetry) {
return vo;
}
// else RETRY
}
}
}
}
template <typename skey_t, typename sval_t, class RecMgr>
int ccavl<skey_t, sval_t, RecMgr>::shouldUpdate(int func, sval_t prev, sval_t expected) {
switch (func) {
case UpdateAlways: return 1;
case UpdateIfAbsent: return prev == NULL;
case UpdateIfPresent: return prev != NULL;
default: return prev == expected; // TODO: use .equals
}
}
// return previous value or NULL
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::putIfAbsent(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t value) {
auto guard = recmgr->getGuard(tid);
auto retval = decodeNull(update(tid, tree, key, UpdateIfAbsent, NULL, encodeNull(value)));
return retval;
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::put(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t value) {
auto guard = recmgr->getGuard(tid);
auto retval = decodeNull(update(tid, tree, key, UpdateAlways, NULL, encodeNull(value)));
return retval;
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::remove_node(const int tid, node_t<skey_t, sval_t>* tree, skey_t key) {
auto guard = recmgr->getGuard(tid);
auto retval = decodeNull(update(tid, tree, key, UpdateAlways, NULL, NULL));
return retval;
}
template <typename skey_t, typename sval_t, class RecMgr>
int ccavl<skey_t, sval_t, RecMgr>::attemptInsertIntoEmpty(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, sval_t vOpt) {
mutex_lock(&(tree->lock));
if (tree->right == NULL) {
tree->right = rbnode_create(tid, key, vOpt, tree);
tree->height = 2;
mutex_unlock(&(tree->lock));
return 1;
} else {
mutex_unlock(&(tree->lock));
return 0;
}
}
/** If successful returns the non-null previous value, SpecialNull for a
* null previous value, or null if not previously in the map.
* The caller should retry if this method returns SpecialRetry.
*/
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::attemptUpdate(
const int tid,
skey_t key,
int func,
sval_t expected,
sval_t newValue,
node_t<skey_t, sval_t>* parent,
node_t<skey_t, sval_t>* curr,
version_t nodeOVL) {
// As the search progresses there is an implicit min and max assumed for the
// branch of the tree rooted at node. A left rotation of a node x results in
// the range of keys in the right branch of x being reduced, so if we are at a
// node and we wish to traverse to one of the branches we must make sure that
// the node has not undergone a rotation since arriving from the parent.
//
// A rotation of node can't screw us up once we have traversed to node's
// child, so we don't need to build a huge transaction, just a chain of
// smaller read-only transactions.
//long cmp;
char dirToC;
assert(parent != curr);
assert(nodeOVL != UnlinkedOVL);
//cmp = key - curr->key;
if (key == curr->key) {
return attemptNodeUpdate(tid, func, expected, newValue, parent, curr);
}
dirToC = key < curr->key ? LEFT : RIGHT;
while (1) {
node_t<skey_t, sval_t>* child = get_child(curr, dirToC);
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
if (child == NULL) {
// key is not present
if (newValue == NULL) {
// Removal is requested. Read of node.child occurred
// while parent.child was valid, so we were not affected
// by any shrinks.
return NULL;
} else {
// Update will be an insert.
int success;
node_t<skey_t, sval_t>* damaged;
mutex_lock(&(curr->lock));
{
// Validate that we haven't been affected by past
// rotations. We've got the lock on node, so no future
// rotations can mess with us.
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
mutex_unlock(&(curr->lock));
return (sval_t) SpecialRetry;
}
if (get_child(curr, dirToC) != NULL) {
// Lost a race with a concurrent insert. No need
// to back up to the parent, but we must RETRY in
// the outer loop of this method.
success = 0;
damaged = NULL;
} else {
// We're valid. Does the user still want to
// perform the operation?
if (!shouldUpdate(func, NULL, expected)) {
mutex_unlock(&(curr->lock));
return NULL;
}
// Create a new leaf
setChild(curr, dirToC, rbnode_create(tid, key, newValue, curr));
success = 1;
// attempt to fix node.height while we've still got
// the lock
damaged = fixHeight_nl(curr);
}
}
mutex_unlock(&(curr->lock));
if (success) {
fixHeightAndRebalance(tid, damaged);
return NULL;
}
// else RETRY
}
} else {
// non-null child
version_t childOVL = child->changeOVL;
if (isShrinkingOrUnlinked(childOVL)) {
waitUntilChangeCompleted(child, childOVL);
// RETRY
} else if (child != get_child(curr, dirToC)) {
// this second read is important, because it is protected
// by childOVL
// RETRY
} else {
// validate the read that our caller took to get to node
if (hasShrunkOrUnlinked(nodeOVL, curr->changeOVL)) {
return (sval_t) SpecialRetry;
}
// At this point we know that the traversal our parent took
// to get to node is still valid. The recursive
// implementation will validate the traversal from node to
// child, so just prior to the nodeOVL validation both
// traversals were definitely okay. This means that we are
// no longer vulnerable to node shrinks, and we don't need
// to validate nodeOVL any more.
sval_t vo = attemptUpdate(tid, key, func,
expected, newValue, curr, child, childOVL);
if (vo != (sval_t) SpecialRetry) {
return vo;
}
// else RETRY
}
}
}
}
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::update(const int tid, node_t<skey_t, sval_t>* tree, skey_t key, int func, sval_t expected, sval_t newValue) {
while (1) {
node_t<skey_t, sval_t>* right = tree->right;
if (right == NULL) {
// key is not present
if (!shouldUpdate(func, NULL, expected) ||
newValue == NULL ||
attemptInsertIntoEmpty(tid, tree, key, newValue)) {
// nothing needs to be done, or we were successful, prev value is Absent
return NULL;
}
// else RETRY
} else {
version_t ovl = right->changeOVL;
if (isShrinkingOrUnlinked(ovl)) {
waitUntilChangeCompleted(right, ovl);
// RETRY
} else if (right == tree->right) {
// this is the protected .right
sval_t vo = attemptUpdate(tid, key, func,
expected, newValue, tree, right, ovl);
if (vo != (sval_t) SpecialRetry) {
return vo;
}
// else RETRY
}
}
}
}
/** parent will only be used for unlink, update can proceed even if parent
* is stale.
*/
template <typename skey_t, typename sval_t, class RecMgr>
sval_t ccavl<skey_t, sval_t, RecMgr>::attemptNodeUpdate(
const int tid,
int func,
sval_t expected,
sval_t newValue,
node_t<skey_t, sval_t>* parent,
node_t<skey_t, sval_t>* curr) {
sval_t prev;
if (newValue == NULL) {
// removal
if (curr->value == NULL) {
// This node is already removed, nothing to do.
return NULL;
}
}
if (newValue == NULL && (curr->left == NULL || curr->right == NULL)) {
// potential unlink, get ready by locking the parent
node_t<skey_t, sval_t>* damaged;
mutex_lock(&(parent->lock));
{
if (isUnlinked(parent->changeOVL) || curr->parent != parent) {
mutex_unlock(&(parent->lock));
return (sval_t) SpecialRetry;
}
mutex_lock(&(curr->lock));
{
prev = curr->value;
if (prev == NULL || !shouldUpdate(func, prev, expected)) {
// nothing to do
mutex_unlock(&(curr->lock));
mutex_unlock(&(parent->lock));
return prev;
}
if (!attemptUnlink_nl(tid, parent, curr)) {
mutex_unlock(&(curr->lock));
mutex_unlock(&(parent->lock));
return (sval_t) SpecialRetry;
}
}
mutex_unlock(&(curr->lock));
// try to fix the parent while we've still got the lock
damaged = fixHeight_nl(parent);
}
mutex_unlock(&(parent->lock));
fixHeightAndRebalance(tid, damaged);
return prev;
} else {
// potential update (including remove-without-unlink)
mutex_lock(&(curr->lock));
{
// regular version changes don't bother us
if (isUnlinked(curr->changeOVL)) {
mutex_unlock(&(curr->lock));
return (sval_t) SpecialRetry;
}
prev = curr->value;
if (!shouldUpdate(func, prev, expected)) {
mutex_unlock(&(curr->lock));
return prev;
}
// retry if we now detect that unlink is possible
if (newValue == NULL && (curr->left == NULL || curr->right == NULL)) {
mutex_unlock(&(curr->lock));
return (sval_t) SpecialRetry;
}
// update in-place
curr->value = newValue;
mutex_unlock(&(curr->lock));
return prev;
}
mutex_unlock(&(curr->lock));
}
}
/** Does not adjust the size or any heights. */
template <typename skey_t, typename sval_t, class RecMgr>
int ccavl<skey_t, sval_t, RecMgr>::attemptUnlink_nl(const int tid, node_t<skey_t, sval_t>* parent, node_t<skey_t, sval_t>* curr) {
node_t<skey_t, sval_t>* parentL;
node_t<skey_t, sval_t>* parentR;
node_t<skey_t, sval_t>* left;
node_t<skey_t, sval_t>* right;
node_t<skey_t, sval_t>* splice;
// assert (Thread.holdsLock(parent));
// assert (Thread.holdsLock( node_t<skey_t, sval_t>*));
assert(!isUnlinked(parent->changeOVL));
parentL = (node_t<skey_t, sval_t>*) parent->left;
parentR = (node_t<skey_t, sval_t>*) parent->right;
if (parentL != curr && parentR != curr) {
// node is no longer a child of parent
return 0;
}
assert(!isUnlinked(curr->changeOVL));
assert(parent == curr->parent);
left = curr->left;
right = curr->right;
if (left != NULL && right != NULL) {
// splicing is no longer possible
return 0;
}
splice = left != NULL ? left : right;
assert(splice != curr);
if (parentL == curr) {
parent->left = splice;
} else {
parent->right = splice;
}
//std::cout<<"calling retire("<<tid<<", "<<curr<<")"<<std::endl;
recmgr->retire(tid, curr);
if (splice != NULL) {
mutex_lock(&(splice->lock));
splice->parent = parent;
mutex_unlock(&(splice->lock));
}
lock_mb();
curr->changeOVL = UnlinkedOVL;
curr->value = NULL;
lock_mb();
//printf("unlink %p %p %p\n", parent, node, splice);
// NOTE: this is a hack to allow deeply nested routines to be able to
// see the root of the tree. This is necessary to allow rp_free
// to be passed the tree lock rather than the node lock.
// My_Tree is a thread local variable that is set by the
// public interface on each method call
//
//rp_free(My_Tree->lock, rbnode_free, node);
// FIX THIS: not doing garbage collection
return 1;
}
//////////////// tree balance and height info repair
template <typename skey_t, typename sval_t, class RecMgr>
int ccavl<skey_t, sval_t, RecMgr>::nodeCondition(node_t<skey_t, sval_t>* curr) {
// Begin atomic.
int hN;
int hL0;
int hR0;
int hNRepl;
int bal;
node_t<skey_t, sval_t>* nL = (node_t<skey_t, sval_t>*) curr->left;
node_t<skey_t, sval_t>* nR = (node_t<skey_t, sval_t>*) curr->right;
if ((nL == NULL || nR == NULL) && curr->value == NULL) {
return UnlinkRequired;
}
hN = curr->height;
hL0 = height(nL);
hR0 = height(nR);
// End atomic. Since any thread that changes a node promises to fix
// it, either our read was consistent (and a NothingRequired conclusion
// is correct) or someone else has taken responsibility for either node
// or one of its children.
hNRepl = 1 + MAX(hL0, hR0);
bal = hL0 - hR0;
if (bal < -1 || bal > 1) {
return RebalanceRequired;
}
return hN != hNRepl ? hNRepl : NothingRequired;
}
template <typename skey_t, typename sval_t, class RecMgr>
void ccavl<skey_t, sval_t, RecMgr>::fixHeightAndRebalance(const int tid, node_t<skey_t, sval_t>* curr) {
while (curr != NULL && curr->parent != NULL) {
int condition = nodeCondition(curr);
if (condition == NothingRequired || isUnlinked(curr->changeOVL)) {
// nothing to do, or no point in fixing this node
return;
}
if (condition != UnlinkRequired && condition != RebalanceRequired) {
node_t<skey_t, sval_t>* new_node;
mutex_lock(&(curr->lock));
{
new_node = fixHeight_nl(curr);
}
mutex_unlock(&(curr->lock));
curr = new_node;
} else {
node_t<skey_t, sval_t>* nParent = (node_t<skey_t, sval_t>*) curr->parent;
mutex_lock(&(nParent->lock));
{
if (!isUnlinked(nParent->changeOVL) && curr->parent == nParent) {
node_t<skey_t, sval_t>* new_node;
mutex_lock(&(curr->lock));
{
new_node = rebalance_nl(tid, nParent, curr);
}
mutex_unlock(&(curr->lock));
curr = new_node;
}
// else RETRY
}
mutex_unlock(&(nParent->lock));
}
}
}
/** Attempts to fix the height of a (locked) damaged node, returning the
* lowest damaged node for which this thread is responsible. Returns null
* if no more repairs are needed.
*/
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::fixHeight_nl(node_t<skey_t, sval_t>* curr) {
int c = nodeCondition(curr);
switch (c) {
case RebalanceRequired:
case UnlinkRequired:
// can't repair
return curr;
case NothingRequired:
// Any future damage to this node is not our responsibility.
return NULL;
default:
curr->height = c;
// we've damaged our parent, but we can't fix it now
return ( node_t<skey_t, sval_t>*) curr->parent;
}
}
/** nParent and n must be locked on entry. Returns a damaged node, or null
* if no more rebalancing is necessary.
*/
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rebalance_nl(const int tid, node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n) {
int hN;
int hL0;
int hR0;
int hNRepl;
int bal;
node_t<skey_t, sval_t>* tainted;
node_t<skey_t, sval_t>* nL = (node_t<skey_t, sval_t>*) n->left;
node_t<skey_t, sval_t>* nR = (node_t<skey_t, sval_t>*) n->right;
if ((nL == NULL || nR == NULL) && n->value == NULL) {
if (attemptUnlink_nl(tid, nParent, n)) {
// attempt to fix nParent.height while we've still got the lock
return fixHeight_nl(nParent);
} else {
// retry needed for n
return n;
}
}
hN = n->height;
hL0 = height(nL);
hR0 = height(nR);
hNRepl = 1 + MAX(hL0, hR0);
bal = hL0 - hR0;
if (bal > 1) {
mutex_lock(&(nL->lock));
tainted = rebalanceToRight_nl(nParent, n, nL, hR0);
mutex_unlock(&(nL->lock));
return tainted;
} else if (bal < -1) {
mutex_lock(&(nR->lock));
tainted = rebalanceToLeft_nl(nParent, n, nR, hL0);
mutex_unlock(&(nR->lock));
return tainted;
} else if (hNRepl != hN) {
// we've got more than enough locks to do a height change, no need to
// trigger a retry
n->height = hNRepl;
// nParent is already locked, let's try to fix it too
return fixHeight_nl(nParent);
} else {
// nothing to do
return NULL;
}
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rebalanceToRight_nl(node_t<skey_t, sval_t>* nParent, node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nL, int hR0) {
node_t<skey_t, sval_t>* result;
// L is too large, we will rotate-right. If L.R is taller
// than L.L, then we will first rotate-left L.
{
int hL = nL->height;
if (hL - hR0 <= 1) {
return n; // retry
} else {
node_t<skey_t, sval_t>* nLR = (node_t<skey_t, sval_t>*) nL->right;
int hLL0 = height((node_t<skey_t, sval_t>*) nL->left);
int hLR0 = height(nLR);
if (hLL0 >= hLR0) {
// rotate right based on our snapshot of hLR
if (nLR != NULL) mutex_lock(&(nLR->lock));
result = rotateRight_nl(nParent, n, nL, nLR, hR0, hLL0, hLR0);
if (nLR != NULL) mutex_unlock(&(nLR->lock));
return result;
} else {
mutex_lock(&(nLR->lock));
{
// If our hLR snapshot is incorrect then we might
// actually need to do a single rotate-right on n.
int hLR = nLR->height;
if (hLL0 >= hLR) {
result = rotateRight_nl(nParent, n, nL, nLR, hR0, hLL0, hLR);
mutex_unlock(&(nLR->lock));
return result;
} else {
// If the underlying left balance would not be
// sufficient to actually fix n.left, then instead
// of rolling it into a double rotation we do it on
// it's own. This may let us avoid rotating n at
// all, but more importantly it avoids the creation
// of damaged nodes that don't have a direct
// ancestry relationship. The recursive call to
// rebalanceToRight_nl in this case occurs after we
// release the lock on nLR.
int hLRL = height((node_t<skey_t, sval_t>*) nLR->left);
int b = hLL0 - hLRL;
if (b >= -1 && b <= 1) {
// nParent.child.left won't be damaged after a double rotation
result = rotateRightOverLeft_nl(nParent, n, nL, nLR,
hR0, hLL0, hLRL);
mutex_unlock(&(nLR->lock));
return result;
}
}
}
// focus on nL, if necessary n will be balanced later
result = rebalanceToLeft_nl(n, nL, nLR, hLL0);
mutex_unlock(&(nLR->lock));
return result;
}
}
}
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rebalanceToLeft_nl(node_t<skey_t, sval_t>* nParent,
node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nR,
int hL0) {
node_t<skey_t, sval_t>* result;
{
int hR = nR->height;
if (hL0 - hR >= -1) {
return n; // retry
} else {
node_t<skey_t, sval_t>* nRL = (node_t<skey_t, sval_t>*) nR->left;
int hRL0 = height(nRL);
int hRR0 = height((node_t<skey_t, sval_t>*) nR->right);
if (hRR0 >= hRL0) {
if (nRL != NULL) mutex_lock(&(nRL->lock));
result = rotateLeft_nl(nParent, n, nR, nRL, hL0, hRL0, hRR0);
if (nRL != NULL) mutex_unlock(&(nRL->lock));
return result;
} else {
mutex_lock(&(nRL->lock));
{
int hRL = nRL->height;
if (hRR0 >= hRL) {
result = rotateLeft_nl(nParent, n, nR, nRL, hL0, hRL, hRR0);
mutex_unlock(&(nRL->lock));
return result;
} else {
int hRLR = height((node_t<skey_t, sval_t>*) nRL->right);
int b = hRR0 - hRLR;
if (b >= -1 && b <= 1) {
result = rotateLeftOverRight_nl(nParent, n,
nR, nRL, hL0, hRR0, hRLR);
mutex_unlock(&(nRL->lock));
return result;
}
}
}
result = rebalanceToRight_nl(n, nR, nRL, hRR0);
mutex_unlock(&(nRL->lock));
return result;
}
}
}
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rotateRight_nl(node_t<skey_t, sval_t>* nParent,
node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nL,
node_t<skey_t, sval_t>* nLR,
int hR,
int hLL,
int hLR) {
int hNRepl;
int balN;
int balL;
version_t nodeOVL = n->changeOVL;
version_t leftOVL = nL->changeOVL;
node_t<skey_t, sval_t>* nPL = (node_t<skey_t, sval_t>*) nParent->left;
n->changeOVL = beginShrink(nodeOVL);
nL->changeOVL = beginGrow(leftOVL);
lock_mb();
// Down links originally to shrinking nodes should be the last to change,
// because if we change them early a search might bypass the OVL that
// indicates its invalidity. Down links originally from shrinking nodes
// should be the first to change, because we have complete freedom when to
// change them. s/down/up/ and s/shrink/grow/ for the parent links.
n->left = nLR;
nL->right = n;
if (nPL == n) {
nParent->left = nL;
} else {
nParent->right = nL;
}
nL->parent = nParent;
n->parent = nL;
if (nLR != NULL) {
nLR->parent = n;
}
// fix up heights links
hNRepl = 1 + MAX(hLR, hR);
n->height = hNRepl;
nL->height = 1 + MAX(hLL, hNRepl);
nL->changeOVL = endGrow(leftOVL);
n->changeOVL = endShrink(nodeOVL);
lock_mb();
// We have damaged nParent, n (now parent.child.right), and nL (now
// parent.child). n is the deepest. Perform as many fixes as we can
// with the locks we've got.
// We've already fixed the height for n, but it might still be outside
// our allowable balance range. In that case a simple fixHeight_nl
// won't help.
balN = hLR - hR;
if (balN < -1 || balN > 1) {
// we need another rotation at n
return n;
}
// we've already fixed the height at nL, do we need a rotation here?
balL = hLL - hNRepl;
if (balL < -1 || balL > 1) {
return nL;
}
// try to fix the parent height while we've still got the lock
return fixHeight_nl(nParent);
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rotateLeft_nl(node_t<skey_t, sval_t>* nParent,
node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nR,
node_t<skey_t, sval_t>* nRL,
int hL,
int hRL,
int hRR) {
int hNRepl;
int balN;
int balR;
version_t nodeOVL = n->changeOVL;
version_t rightOVL = nR->changeOVL;
node_t<skey_t, sval_t>* nPL = (node_t<skey_t, sval_t>*) nParent->left;
n->changeOVL = beginShrink(nodeOVL);
nR->changeOVL = beginGrow(rightOVL);
lock_mb();
n->right = nRL;
nR->left = n;
if (nPL == n) {
nParent->left = nR;
} else {
nParent->right = nR;
}
nR->parent = nParent;
n->parent = nR;
if (nRL != NULL) {
nRL->parent = n;
}
// fix up heights
hNRepl = 1 + MAX(hL, hRL);
n->height = hNRepl;
nR->height = 1 + MAX(hNRepl, hRR);
nR->changeOVL = endGrow(rightOVL);
n->changeOVL = endShrink(nodeOVL);
lock_mb();
balN = hRL - hL;
if (balN < -1 || balN > 1) {
return n;
}
balR = hRR - hNRepl;
if (balR < -1 || balR > 1) {
return nR;
}
return fixHeight_nl(nParent);
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rotateRightOverLeft_nl(node_t<skey_t, sval_t>* nParent,
node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nL,
node_t<skey_t, sval_t>* nLR,
int hR,
int hLL,
int hLRL) {
int hNRepl;
int hLRepl;
int balN;
int balLR;
version_t nodeOVL = n->changeOVL;
version_t leftOVL = nL->changeOVL;
version_t leftROVL = nLR->changeOVL;
node_t<skey_t, sval_t>* nPL = (node_t<skey_t, sval_t>*) nParent->left;
node_t<skey_t, sval_t>* nLRL = (node_t<skey_t, sval_t>*) nLR->left;
node_t<skey_t, sval_t>* nLRR = (node_t<skey_t, sval_t>*) nLR->right;
int hLRR = height(nLRR);
n->changeOVL = beginShrink(nodeOVL);
nL->changeOVL = beginShrink(leftOVL);
nLR->changeOVL = beginGrow(leftROVL);
lock_mb();
n->left = nLRR;
nL->right = nLRL;
nLR->left = nL;
nLR->right = n;
if (nPL == n) {
nParent->left = nLR;
} else {
nParent->right = nLR;
}
nLR->parent = nParent;
nL->parent = nLR;
n->parent = nLR;
if (nLRR != NULL) {
nLRR->parent = n;
}
if (nLRL != NULL) {
nLRL->parent = nL;
}
// fix up heights
hNRepl = 1 + MAX(hLRR, hR);
n->height = hNRepl;
hLRepl = 1 + MAX(hLL, hLRL);
nL->height = hLRepl;
nLR->height = 1 + MAX(hLRepl, hNRepl);
nLR->changeOVL = endGrow(leftROVL);
nL->changeOVL = endShrink(leftOVL);
n->changeOVL = endShrink(nodeOVL);
lock_mb();
// caller should have performed only a single rotation if nL was going
// to end up damaged
assert(ABS(hLL - hLRL) <= 1);
// We have damaged nParent, nLR (now parent.child), and n (now
// parent.child.right). n is the deepest. Perform as many fixes as we
// can with the locks we've got.
// We've already fixed the height for n, but it might still be outside
// our allowable balance range. In that case a simple fixHeight_nl
// won't help.
balN = hLRR - hR;
if (balN < -1 || balN > 1) {
// we need another rotation at n
return n;
}
// we've already fixed the height at nLR, do we need a rotation here?
balLR = hLRepl - hNRepl;
if (balLR < -1 || balLR > 1) {
return nLR;
}
// try to fix the parent height while we've still got the lock
return fixHeight_nl(nParent);
}
template <typename skey_t, typename sval_t, class RecMgr>
node_t<skey_t, sval_t>* ccavl<skey_t, sval_t, RecMgr>::rotateLeftOverRight_nl(node_t<skey_t, sval_t>* nParent,
node_t<skey_t, sval_t>* n,
node_t<skey_t, sval_t>* nR,
node_t<skey_t, sval_t>* nRL,
int hL,
int hRR,
int hRLR) {
int hNRepl;
int hRRepl;
int balN;
int balRL;
version_t nodeOVL = n->changeOVL;
version_t rightOVL = nR->changeOVL;
version_t rightLOVL = nRL->changeOVL;
node_t<skey_t, sval_t>* nPL = (node_t<skey_t, sval_t>*) nParent->left;
node_t<skey_t, sval_t>* nRLL = (node_t<skey_t, sval_t>*) nRL->left;
int hRLL = height(nRLL);
node_t<skey_t, sval_t>* nRLR = (node_t<skey_t, sval_t>*) nRL->right;
n->changeOVL = beginShrink(nodeOVL);
nR->changeOVL = beginShrink(rightOVL);
nRL->changeOVL = beginGrow(rightLOVL);
lock_mb();
n->right = nRLL;
nR->left = nRLR;
nRL->right = nR;
nRL->left = n;
if (nPL == n) {
nParent->left = nRL;
} else {
nParent->right = nRL;
}
nRL->parent = nParent;
nR->parent = nRL;
n->parent = nRL;
if (nRLL != NULL) {
nRLL->parent = n;
}
if (nRLR != NULL) {
nRLR->parent = nR;
}
// fix up heights
hNRepl = 1 + MAX(hL, hRLL);
n->height = hNRepl;
hRRepl = 1 + MAX(hRLR, hRR);
nR->height = hRRepl;
nRL->height = 1 + MAX(hNRepl, hRRepl);
nRL->changeOVL = endGrow(rightLOVL);
nR->changeOVL = endShrink(rightOVL);
n->changeOVL = endShrink(nodeOVL);
lock_mb();
assert(ABS(hRR - hRLR) <= 1);
balN = hRLL - hL;
if (balN < -1 || balN > 1) {
return n;
}
balRL = hRRepl - hNRepl;
if (balRL < -1 || balRL > 1) {
return nRL;
}
return fixHeight_nl(nParent);
}
#endif
|
GB_emult_template.c | //------------------------------------------------------------------------------
// GB_emult_template: phase1 and phase2 for C=A.*B, C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Computes C=A.*B (no mask) or C<M>=A.*B (mask present and not complemented).
// Does not handle the case C<!M>=A.*B. The complemented mask is handled in
// GB_mask instead. If present, the mask M is assumed to be very sparse
// compared with A and B.
// phase1: does not compute C itself, but just counts the # of entries in each
// vector of C. Fine tasks compute the # of entries in their slice of a
// single vector of C, and the results are cumsum'd.
// phase2: computes C, using the counts computed by phase1.
{
// iB_first is unused if the operator is FIRST
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A, B, M, and C
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
const int64_t *restrict Mi = NULL ;
const GB_void *restrict Mx = NULL ;
GB_cast_function cast_M = NULL ;
size_t msize = 0 ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = M->x ;
cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ;
msize = M->type->size ;
}
#if defined ( GB_PHASE_2_OF_2 )
const GB_ATYPE *restrict Ax = A->x ;
const GB_ATYPE *restrict Bx = B->x ;
const int64_t *restrict Cp = C->p ;
const int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_CTYPE *restrict Cx = C->x ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j); phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (Ch == Ah) ? k :
((C_to_A == NULL) ? j : C_to_A [k]) ;
if (kA >= 0)
{
pA = Ap [kA] ;
pA_end = Ap [kA+1] ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1 ;
if (ajnz > 0)
{
iA_first = Ai [pA] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iA_last = -1 ;
if (ajnz > 0)
{
iA_last = Ai [pA_end-1] ;
}
#endif
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (Ch == Bh) ? k :
((C_to_B == NULL) ? j : C_to_B [k]) ;
if (kB >= 0)
{
pB = Bp [kB] ;
pB_end = Bp [kB+1] ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1 ;
if (bjnz > 0)
{
iB_first = Bi [pB] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iB_last = -1 ;
if (bjnz > 0)
{
iB_last = Bi [pB_end-1] ;
}
#endif
//------------------------------------------------------------------
// phase1: count nnz (C (:,j)); phase2: compute C(:,j)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (ajnz == 0 || bjnz == 0)
{
//--------------------------------------------------------------
// A(:,j) and/or B(:,j) are empty
//--------------------------------------------------------------
;
}
else if (iA_last < iB_first || iB_last < iA_first)
{
//--------------------------------------------------------------
// intersection of A(:,j) and B(:,j) is empty
//--------------------------------------------------------------
// the last entry of A(:,j) comes before the first entry
// of B(:,j), or visa versa
;
}
else
#endif
if (M == NULL)
{
if (adense && bdense)
{
//----------------------------------------------------------
// A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
Ci [pC + p] = p + iA_first ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// A(:,j) is dense, B(:,j) is sparse: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = Bi [pB + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + i - iA_first) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// A(:,j) is sparse, B(:,j) is dense: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = Ai [pA + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + i - iB_first) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//----------------------------------------------------------
// A(:,j) and B(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and B(:,j)
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// A(i,j) exists but not B(i,j)
pA++ ;
}
else if (iB < iA)
{
// B(i,j) exists but not A(i,j)
pB++ ;
}
else
{
// both A(i,j) and B(i,j) exist
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = iB ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
pA++ ;
pB++ ;
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
}
else
{
//--------------------------------------------------------------
// Mask is present
//--------------------------------------------------------------
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch == Mh)
{
// Ch is the same as Mh (a shallow copy), or both NULL
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = Mp [kM] ;
pM_end = Mp [kM+1] ;
}
}
//--------------------------------------------------------------
// C(:,j)<M(:,j) = A(:,j) .* B (:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) .* B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
int64_t apright = pA_end - 1 ;
bool afound ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
if (!afound) continue ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
int64_t bpright = pB_end - 1 ;
bool bfound ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
if (!bfound) continue ;
//----------------------------------------------------------
// C(i,j) = A(i,j) .* B(i,j)
//----------------------------------------------------------
// C (i,j) = A (i,j) .* B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.