source
stringlengths
3
92
c
stringlengths
26
2.25M
yescrypt-simd_c.h
/*- * Copyright 2009 Colin Percival * Copyright 2012-2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ /* * On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding * gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX * and XOP are of further help either way. */ #ifndef __SSE4_1__ #warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance" #endif #include <emmintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "sha256.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform_c.h" #if __STDC_VERSION__ >= 199901L /* have restrict */ #elif defined(__GNUC__) #define restrict __restrict #else #define restrict #endif #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #define PREFETCH_OUT(x, hint) /* disabled */ #ifdef __XOP__ #define ARX(out, in1, in2, s) \ out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s)); #else #define ARX(out, in1, in2, s) \ { \ __m128i T = _mm_add_epi32(in1, in2); \ out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \ out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \ } #endif #define SALSA20_2ROUNDS \ /* Operate on "columns" */ \ ARX(X1, X0, X3, 7) \ ARX(X2, X1, X0, 9) \ ARX(X3, X2, X1, 13) \ ARX(X0, X3, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x93); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x39); \ \ /* Operate on "rows" */ \ ARX(X3, X0, X1, 7) \ ARX(X2, X3, X0, 9) \ ARX(X1, X2, X3, 13) \ ARX(X0, X1, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x39); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x93); /** * Apply the salsa20/8 core to the block provided in (X0 ... X3). */ #define SALSA20_8_BASE(maybe_decl, out) \ { \ maybe_decl Y0 = X0; \ maybe_decl Y1 = X1; \ maybe_decl Y2 = X2; \ maybe_decl Y3 = X3; \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ (out)[0] = X0 = _mm_add_epi32(X0, Y0); \ (out)[1] = X1 = _mm_add_epi32(X1, Y1); \ (out)[2] = X2 = _mm_add_epi32(X2, Y2); \ (out)[3] = X3 = _mm_add_epi32(X3, Y3); \ } #define SALSA20_8(out) \ SALSA20_8_BASE(__m128i, out) /** * Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3). */ #define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \ X0 = _mm_xor_si128(X0, Z0); \ X1 = _mm_xor_si128(X1, Z1); \ X2 = _mm_xor_si128(X2, Z2); \ X3 = _mm_xor_si128(X3, Z3); \ SALSA20_8_BASE(maybe_decl, out) #define SALSA20_8_XOR_MEM(in, out) \ SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out) #define SALSA20_8_XOR_REG(out) \ SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out) typedef union { uint32_t w[16]; __m128i q[4]; } salsa20_blk_t; /** * blockmix_salsa8(Bin, Bout, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. */ static inline void blockmix_salsa8(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3; size_t i; r--; PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ X0 = Bin[r * 2 + 1].q[0]; X1 = Bin[r * 2 + 1].q[1]; X2 = Bin[r * 2 + 1].q[2]; X3 = Bin[r * 2 + 1].q[3]; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q) } /* * (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs * starting with Sandy Bridge. Additionally, PSHUFD uses separate source and * destination registers, whereas the shifts would require an extra move * instruction for our code when building without AVX. Unfortunately, PSHUFD * is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ) * and somewhat slower on some non-Intel CPUs (luckily not including AMD * Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a * win in terms of throughput or/and not needing a move instruction, we * currently use it despite of the higher latency on some older CPUs. As an * alternative, the #if below may be patched to only enable use of (V)PSHUFD * when building with SSE4.1 or newer, which is not available on older CPUs * where this instruction has higher latency. */ #if 1 #define HI32(X) \ _mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1)) #elif 0 #define HI32(X) \ _mm_srli_si128((X), 4) #else #define HI32(X) \ _mm_srli_epi64((X), 32) #endif #if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__)) /* Intel's name, also supported by recent gcc */ #define EXTRACT64(X) _mm_cvtsi128_si64(X) #elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__) /* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */ #define EXTRACT64(X) _mm_cvtsi128_si64x(X) #elif defined(__x86_64__) && defined(__SSE4_1__) /* No known bugs for this intrinsic */ #include <smmintrin.h> #define EXTRACT64(X) _mm_extract_epi64((X), 0) #elif defined(__SSE4_1__) /* 32-bit */ #include <smmintrin.h> #if 0 /* This is currently unused by the code below, which instead uses these two * intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32)) #endif #else /* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32)) #endif /* This is tunable */ #define S_BITS 8 /* Not tunable in this implementation, hard-coded in a few places */ #define S_SIMD 2 #define S_P 4 /* Number of S-boxes. Not tunable by design, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable except via S_BITS above. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8) #if !defined(__x86_64__) && defined(__SSE4_1__) /* 32-bit with SSE4.1 */ #define PWXFORM_X_T __m128i #define PWXFORM_SIMD(X, x, s0, s1) \ x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \ s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \ s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #else /* 64-bit, or 32-bit without SSE4.1 */ #define PWXFORM_X_T uint64_t #define PWXFORM_SIMD(X, x, s0, s1) \ x = EXTRACT64(X) & S_MASK2; \ s0 = *(const __m128i *)(S0 + (uint32_t)x); \ s1 = *(const __m128i *)(S1 + (x >> 32)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #endif #define PWXFORM_ROUND \ PWXFORM_SIMD(X0, x0, s00, s01) \ PWXFORM_SIMD(X1, x1, s10, s11) \ PWXFORM_SIMD(X2, x2, s20, s21) \ PWXFORM_SIMD(X3, x3, s30, s31) #define PWXFORM \ { \ PWXFORM_X_T x0, x1, x2, x3; \ __m128i s00, s01, s10, s11, s20, s21, s30, s31; \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ } #define XOR4(in) \ X0 = _mm_xor_si128(X0, (in)[0]); \ X1 = _mm_xor_si128(X1, (in)[1]); \ X2 = _mm_xor_si128(X2, (in)[2]); \ X3 = _mm_xor_si128(X3, (in)[3]); #define OUT(out) \ (out)[0] = X0; \ (out)[1] = X1; \ (out)[2] = X2; \ (out)[3] = X3; /** * blockmix_pwxform(Bin, Bout, r, S): * Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. */ static void blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) { blockmix_salsa8(Bin, Bout, r); return; } S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0) /* X <-- B_{r1 - 1} */ X0 = Bin[r].q[0]; X1 = Bin[r].q[1]; X2 = Bin[r].q[2]; X3 = Bin[r].q[3]; /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) } #define XOR4_2(in1, in2) \ X0 = _mm_xor_si128((in1)[0], (in2)[0]); \ X1 = _mm_xor_si128((in1)[1], (in2)[1]); \ X2 = _mm_xor_si128((in1)[2], (in2)[2]); \ X3 = _mm_xor_si128((in1)[3], (in2)[3]); static inline uint32_t blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM) { __m128i X0, X1, X2, X3; size_t i; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } else { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q) SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q) SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } static uint32_t blockmix_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r], _MM_HINT_NTA) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_NTA) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } else { PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef XOR4 #define XOR4(in, out) \ (out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \ (out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \ (out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \ (out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]); static inline uint32_t blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; r--; PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q, Bin2[0].q) SALSA20_8_XOR_REG(Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q, Bin2[i * 2].q) SALSA20_8_XOR_REG(Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } #define XOR4_Y \ X0 = _mm_xor_si128(X0, Y0); \ X1 = _mm_xor_si128(X1, Y1); \ X2 = _mm_xor_si128(X2, Y2); \ X3 = _mm_xor_si128(X3, Y3); static uint32_t blockmix_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; if (!S) return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { XOR4(Bin1[i].q, Bin2[i].q) /* X <-- H'(X \xor B_i) */ XOR4_Y PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q, Bin2[i].q) XOR4_Y PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef ARX #undef SALSA20_2ROUNDS #undef SALSA20_8 #undef SALSA20_8_XOR_ANY #undef SALSA20_8_XOR_MEM #undef SALSA20_8_XOR_REG #undef PWXFORM_SIMD_1 #undef PWXFORM_SIMD_2 #undef PWXFORM_ROUND #undef PWXFORM #undef OUT #undef XOR4 #undef XOR4_2 #undef XOR4_Y /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint32_t integerify(const salsa20_blk_t * B, size_t r) { return B[2 * r - 1].w[0]; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 128r bytes in length. The value N must be even and no * smaller than 2. The array V must be aligned to a multiple of 64 bytes, and * arrays B and XY to a multiple of at least 16 bytes (aligning them to 64 * bytes as well saves cache lines, but might result in cache bank conflicts). */ static void smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = V, * Y; uint32_t i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } if (NROM && (VROM_mask & 1)) { uint32_t n; salsa20_blk_t * V_n; const salsa20_blk_t * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); X = &V[2 * s]; if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ j = blockmix_xor(Y, V_j, X, r, 1, S); } else { /* X <-- H(X) */ blockmix(Y, X, r, S); j = integerify(X, r); } for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V_n[i * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((n + i) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 1, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((N - 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; } /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 1, S); } else if (flags & YESCRYPT_RW) { uint32_t n; salsa20_blk_t * V_n, * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[2 * s]; blockmix(Y, X, r, S); j = integerify(X, r); for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { Y = &V_n[i * s]; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 0, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 0, S); } else { /* 2: for i = 0 to N - 1 do */ for (i = 1; i < N - 1; i += 2) { /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[(i + 1) * s]; blockmix(Y, X, r, S); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ X = XY; blockmix(Y, X, r, S); } /* B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r bytes in length. The value N must be a power of 2 * greater than 1. The value Nloop must be even. The array V must be aligned * to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16 * bytes (aligning them to 64 bytes as well saves cache lines, but might result * in cache bank conflicts). */ static void smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = XY, * Y = &XY[s]; uint64_t i; uint32_t j; size_t k; if (Nloop == 0) return; /* X <-- B' */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } i = Nloop / 2; /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* * Normally, NROM implies YESCRYPT_RW, but we check for these separately * because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls * operating on the entire V. */ if (NROM && (flags & YESCRYPT_RW)) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(X, V_j, Y, r, S); if (((i + 1) & VROM_mask) == 1) { const salsa20_blk_t * VROM_j; j &= NROM - 1; VROM_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, VROM_j, X, r, 1, S); } else { j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(Y, V_j, X, r, S); } j &= N - 1; V_j = &V[j * s]; } } else if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, S); if (((i + 1) & VROM_mask) == 1) { j &= NROM - 1; V_j = &VROM[j * s]; } else { j &= N - 1; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 1, S); j &= N - 1; V_j = &V[j * s]; } } else if (flags & YESCRYPT_RW) { /* 6: for i = 0 to N - 1 do */ do { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(X, V_j, Y, r, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(Y, V_j, X, r, S); j &= N - 1; } while (--i); } else { /* 6: for i = 0 to N - 1 do */ do { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(X, V_j, Y, r, 0, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 0, S); j &= N - 1; } while (--i); } /* 10: B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage XY * must be 256r or 256rp bytes in length (the larger size is required with * OpenMP-enabled builds). The value N must be a power of 2 greater than 1. * The array V must be aligned to a multiple of 64 bytes, and arrays B and * XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well * saves cache lines and helps avoid false sharing in OpenMP-enabled builds * when p > 1, but it might also result in cache bank conflicts). */ static void smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { size_t s = 2 * r; uint32_t Nchunk = N / p; uint64_t Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint32_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint32_t Vchunk = i * Nchunk; uint8_t * Bp = &B[128 * r * i]; salsa20_blk_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 128, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint8_t * Bp = &B[128 * r * i]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. (This optimized implementation currently additionally * limits N to the range from 8 to 2^31, but other implementation might not.) * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ static int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint8_t * B, * S; salsa20_blk_t * V, * XY; uint8_t sha256[32]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (N > UINT32_MAX) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) { errno = EINVAL; return -1; } if ((r > SIZE_MAX / 256 / p) || (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / S_SIZE_ALL) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (NROM > UINT32_MAX) { errno = EFBIG; return -1; } if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (salsa20_blk_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL; #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint8_t *)tmp.aligned; XY = (salsa20_blk_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint8_t *)local->aligned; V = (salsa20_blk_t *)((uint8_t *)B + B_size); XY = (salsa20_blk_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint8_t *)XY + XY_size; if (t || flags) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, passwd, passwdlen); SHA256_Final(sha256, &ctx); passwd = sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size); if (t || flags) memcpy(sha256, B, sizeof(sha256)); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, &V[(size_t)2 * r * i * N], NROM, shared, &XY[(size_t)4 * r * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX ctx; HMAC_SHA256_Init(&ctx, buf, buflen); HMAC_SHA256_Update(&ctx, "JagaricoinR", 10); HMAC_SHA256_Final(sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, sha256, sizeof(sha256)); SHA256_Final(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
bml_multiply_ellblock_typed.c
#include "../../internal-blas/bml_gemm.h" #include "../../macros.h" #include "../../typed.h" #include "../bml_add.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_multiply.h" #include "../bml_multiply.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_add_ellblock.h" #include "bml_allocate_ellblock.h" #include "bml_multiply_ellblock.h" #include "bml_types_ellblock.h" #include "bml_utilities_ellblock.h" #include <complex.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif void TYPED_FUNC( bml_multiply_block1) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0]; xk++; b += ld; } } } void TYPED_FUNC( bml_multiply_block2) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0] + a[1] * b[1]; xk++; b += ld; } } } void TYPED_FUNC( bml_multiply_block3) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; xk++; b += ld; } } } void TYPED_FUNC( bml_multiply_block4) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]; xk++; b += ld; } } } void TYPED_FUNC( bml_multiply_block5) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3] + a[4] * b[4]; xk++; b += ld; } } } void TYPED_FUNC( bml_multiply_block6) ( REAL_T * mat0, REAL_T * mat1, const int ld, REAL_T * x, const int bsizeib, const int bsizekb) { REAL_T *xk = x; for (int ii = 0; ii < bsizeib; ii++) { REAL_T *a = mat0 + ld * ii; REAL_T *b = mat1; for (int jj = 0; jj < bsizekb; jj++) { (*xk) += a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3] + a[4] * b[4] + a[5] * b[5]; xk++; b += ld; } } } /** Matrix multiply. * * \f$ C \leftarrow \alpha A \, B + \beta C \f$ * * \ingroup multiply_group * * \param A Matrix A * \param B Matrix B * \param C Matrix C * \param alpha Scalar factor multiplied by A * B * \param beta Scalar factor multiplied by C * \param threshold Used for sparse multiply */ void TYPED_FUNC( bml_multiply_ellblock) ( bml_matrix_ellblock_t * A, bml_matrix_ellblock_t * B, bml_matrix_ellblock_t * C, double alpha, double beta, double threshold) { double ONE = 1.0; double ZERO = 0.0; if (A == NULL || B == NULL) { LOG_ERROR("Either matrix A or B are NULL\n"); } if (A == B && alpha == ONE && beta == ZERO) { TYPED_FUNC(bml_multiply_x2_ellblock) (A, C, threshold); } else { bml_matrix_ellblock_t *A2 = TYPED_FUNC(bml_block_matrix_ellblock) (C->NB, C->MB, C->M, C->bsize, C->distribution_mode); if (A != NULL && A == B) { TYPED_FUNC(bml_multiply_x2_ellblock) (A, A2, threshold); } else { TYPED_FUNC(bml_multiply_AB_ellblock) (A, B, A2, threshold); } #ifdef DO_MPI if (bml_getNRanks() > 1 && A2->distribution_mode == distributed) { bml_allGatherVParallel(A2); } #endif TYPED_FUNC(bml_add_ellblock) (C, A2, beta, alpha, threshold); bml_deallocate_ellblock(A2); } } /** Matrix multiply. * * \f$ X^{2} \leftarrow X \, X \f$ * * \ingroup multiply_group * * \param X Matrix X * \param X2 Matrix X2 * \param threshold Used for sparse multiply */ void *TYPED_FUNC( bml_multiply_x2_ellblock) ( bml_matrix_ellblock_t * X, bml_matrix_ellblock_t * X2, double threshold) { int NB = X->NB; int MB = X->MB; int *X_indexb = X->indexb; int *X_nnzb = X->nnzb; int *bsize = X->bsize; int *X2_indexb = X2->indexb; int *X2_nnzb = X2->nnzb; int ix[NB], jx[NB]; REAL_T *x_ptr[NB]; REAL_T traceX = 0.0; REAL_T traceX2 = 0.0; REAL_T **X_ptr_value = (REAL_T **) X->ptr_value; REAL_T **X2_ptr_value = (REAL_T **) X2->ptr_value; double *trace = bml_allocate_memory(sizeof(double) * 2); memset(ix, 0, NB * sizeof(int)); memset(jx, 0, NB * sizeof(int)); int maxbsize = 0; for (int ib = 0; ib < NB; ib++) maxbsize = MAX(maxbsize, bsize[ib]); int maxbsize2 = maxbsize * maxbsize; #ifdef _OPENMP const int nthreads = omp_get_max_threads(); #else const int nthreads = 1; #endif REAL_T *x_ptr_storage = bml_allocate_memory(maxbsize2 * NB * nthreads * sizeof(REAL_T)); char xptrset = 0; void ( *fun_ptr_arr[]) ( REAL_T *, REAL_T *, const int, REAL_T *, const int, const int) = { TYPED_FUNC(bml_multiply_block1), TYPED_FUNC(bml_multiply_block2), TYPED_FUNC(bml_multiply_block3), TYPED_FUNC(bml_multiply_block4), TYPED_FUNC(bml_multiply_block5), TYPED_FUNC(bml_multiply_block6)}; #pragma omp parallel for \ firstprivate(ix,jx, x_ptr, xptrset) \ reduction(+: traceX, traceX2) //loop over row blocks for (int ib = 0; ib < NB; ib++) { int lb = 0; if (!xptrset) { #ifdef _OPENMP int offset = omp_get_thread_num() * maxbsize2 * NB; #else int offset = 0; #endif for (int i = 0; i < NB; i++) x_ptr[i] = &x_ptr_storage[offset + i * maxbsize2]; xptrset = 1; } REAL_T *T_value_right = bml_noinit_allocate_memory(maxbsize2 * sizeof(REAL_T)); // loop over non-zero blocks in row block ib for (int jp = 0; jp < X_nnzb[ib]; jp++) { int ind = ROWMAJOR(ib, jp, NB, MB); REAL_T *X_value_left = X_ptr_value[ind]; int jb = X_indexb[ind]; const int bsizejb = bsize[jb]; if (jb == ib) { for (int kk = 0; kk < bsize[ib]; kk++) traceX += X_value_left[kk * (1 + bsize[ib])]; } for (int kp = 0; kp < X_nnzb[jb]; kp++) { int indk = ROWMAJOR(jb, kp, NB, MB); int kb = X_indexb[indk]; if (ix[kb] == 0) { memset(x_ptr[kb], 0.0, maxbsize2 * sizeof(REAL_T)); jx[lb] = kb; ix[kb] = ib + 1; lb++; } REAL_T *x = x_ptr[kb]; REAL_T *X_value_right = X_ptr_value[indk]; // multiply block ib,jb by block jb,kb #ifndef BML_USE_XSMM const int bsizekb = bsize[kb]; // transpose storage for matrix on the right for (int ii = 0; ii < bsizejb; ii++) { const int offset = ii * bsizekb; for (int jj = 0; jj < bsizekb; jj++) T_value_right[jj * bsizejb + ii] = X_value_right[offset + jj]; } (*fun_ptr_arr[bsizejb - 1]) (X_value_left, T_value_right, bsizejb, x, bsize[ib], bsizekb); #else REAL_T alpha = (REAL_T) 1.; REAL_T beta = (REAL_T) 1.; TYPED_FUNC(bml_xsmm_gemm) ("N", "N", &bsize[kb], &bsize[ib], &bsize[jb], &alpha, X_value_right, &bsize[kb], X_value_left, &bsize[jb], &beta, x, &bsize[kb]); #endif } } // Check for number of non-zero blocks per block row exceeded if (lb > MB) { LOG_ERROR ("Number of non-zeroes blocks per row > MB, Increase MB\n"); } int ll = 0; for (int jb = 0; jb < lb; jb++) { assert(ll < MB); int jp = jx[jb]; REAL_T *xtmp = x_ptr[jp]; double normx = TYPED_FUNC(bml_norm_inf_fast) (xtmp, bsize[ib] * bsize[jp]); if (jp == ib || (normx > threshold)) { if (jp == ib) { for (int kk = 0; kk < bsize[ib]; kk++) traceX2 = traceX2 + xtmp[kk * (1 + bsize[jb])]; } int nelements = bsize[ib] * bsize[jp]; int ind = ROWMAJOR(ib, ll, NB, MB); assert(ind < NB * MB); if (X2_ptr_value[ind] == NULL) X2_ptr_value[ind] = TYPED_FUNC(bml_allocate_block_ellblock) (X2, ib, nelements); REAL_T *X2_value = X2_ptr_value[ind]; assert(X2_value != NULL); memcpy(X2_value, xtmp, nelements * sizeof(REAL_T)); X2_indexb[ind] = jp; ll++; } ix[jp] = 0; memset(x_ptr[jp], 0, maxbsize2 * sizeof(REAL_T)); } X2_nnzb[ib] = ll; bml_free_memory(T_value_right); } trace[0] = traceX; trace[1] = traceX2; bml_free_memory(x_ptr_storage); return trace; } /** Matrix multiply. * * \f$ C \leftarrow B \, A \f$ * * \ingroup multiply_group * * \param A Matrix A * \param B Matrix B * \param C Matrix C * \param threshold Used for sparse multiply */ void TYPED_FUNC( bml_multiply_AB_ellblock) ( bml_matrix_ellblock_t * A, bml_matrix_ellblock_t * B, bml_matrix_ellblock_t * C, double threshold) { assert(A->NB == B->NB); assert(A->NB == C->NB); int NB = A->NB; int *A_nnzb = A->nnzb; int *A_indexb = A->indexb; int *bsize = A->bsize; int *B_nnzb = B->nnzb; int *B_indexb = B->indexb; int *C_nnzb = C->nnzb; int *C_indexb = C->indexb; int ix[NB], jx[NB]; REAL_T *x_ptr[NB]; REAL_T **A_ptr_value = (REAL_T **) A->ptr_value; REAL_T **B_ptr_value = (REAL_T **) B->ptr_value; REAL_T **C_ptr_value = (REAL_T **) C->ptr_value; memset(ix, 0, NB * sizeof(int)); memset(jx, 0, NB * sizeof(int)); int maxbsize = 0; for (int ib = 0; ib < NB; ib++) maxbsize = MAX(maxbsize, bsize[ib]); int maxbsize2 = maxbsize * maxbsize; #ifdef _OPENMP int nthreads = omp_get_max_threads(); #else int nthreads = 1; #endif REAL_T *x_ptr_storage = calloc(maxbsize2 * NB * nthreads, sizeof(REAL_T)); char xptrset = 0; void ( *fun_ptr_arr[]) ( REAL_T *, REAL_T *, const int, REAL_T *, const int, const int) = { TYPED_FUNC(bml_multiply_block1), TYPED_FUNC(bml_multiply_block2), TYPED_FUNC(bml_multiply_block3), TYPED_FUNC(bml_multiply_block4), TYPED_FUNC(bml_multiply_block5), TYPED_FUNC(bml_multiply_block6)}; //loop over row blocks #pragma omp parallel for \ firstprivate(ix, jx, x_ptr, xptrset) for (int ib = 0; ib < NB; ib++) { int lb = 0; if (!xptrset) { #ifdef _OPENMP int offset = omp_get_thread_num() * maxbsize2 * NB; #else int offset = 0; #endif for (int i = 0; i < NB; i++) x_ptr[i] = &x_ptr_storage[offset + i * maxbsize2]; xptrset = 1; } REAL_T *T_value_right = bml_noinit_allocate_memory(maxbsize2 * sizeof(REAL_T)); //loop over blocks in this block row "ib" for (int jp = 0; jp < A_nnzb[ib]; jp++) { int ind = ROWMAJOR(ib, jp, NB, A->MB); REAL_T *A_value = A_ptr_value[ind]; int jb = A_indexb[ind]; const int bsizejb = bsize[jb]; for (int kp = 0; kp < B_nnzb[jb]; kp++) { int kb = B_indexb[ROWMAJOR(jb, kp, NB, B->MB)]; //compute column block "kb" of result if (ix[kb] == 0) { memset(x_ptr[kb], 0, maxbsize2 * sizeof(REAL_T)); jx[lb] = kb; ix[kb] = ib + 1; lb++; } REAL_T *x = x_ptr[kb]; REAL_T *B_value = B_ptr_value[ROWMAJOR(jb, kp, NB, B->MB)]; // transpose storage for matrix on the right const int bsizekb = bsize[kb]; for (int ii = 0; ii < bsizejb; ii++) { const int offset = ii * bsizekb; for (int jj = 0; jj < bsizekb; jj++) T_value_right[jj * bsizejb + ii] = B_value[offset + jj]; } (*fun_ptr_arr[bsizejb - 1]) (A_value, T_value_right, bsizejb, x, bsize[ib], bsizekb); } } // Check for number of non-zeroes per row exceeded if (lb > C->MB) { LOG_ERROR("Number of non-zeroes per row > M, Increase M\n"); } int ll = 0; for (int jb = 0; jb < lb; jb++) { int jp = jx[jb]; REAL_T *xtmp = x_ptr[jp]; double normx = TYPED_FUNC(bml_norm_inf) (xtmp, bsize[ib], bsize[jp], bsize[jp]); if (jp == ib || (normx > threshold)) { int nelements = bsize[ib] * bsize[jp]; int ind = ROWMAJOR(ib, ll, NB, C->MB); C_ptr_value[ind] = TYPED_FUNC(bml_allocate_block_ellblock) (C, ib, nelements); memcpy(C_ptr_value[ind], xtmp, nelements * sizeof(REAL_T)); C_indexb[ind] = jp; ll++; } ix[jp] = 0; memset(x_ptr[jp], 0, maxbsize2 * sizeof(REAL_T)); } C_nnzb[ib] = ll; bml_free_memory(T_value_right); } free(x_ptr_storage); } /** Matrix multiply with threshold adjustment. * * \f$ C \leftarrow B \, A \f$ * * \ingroup multiply_group * * \param A Matrix A * \param B Matrix B * \param C Matrix C * \param threshold Used for sparse multiply */ void TYPED_FUNC( bml_multiply_adjust_AB_ellblock) ( bml_matrix_ellblock_t * A, bml_matrix_ellblock_t * B, bml_matrix_ellblock_t * C, double threshold) { int NB = A->NB; int MB = A->MB; int *A_nnzb = A->nnzb; int *A_indexb = A->indexb; int *bsize = A->bsize; int *B_nnzb = B->nnzb; int *B_indexb = B->indexb; int *C_nnzb = C->nnzb; int *C_indexb = C->indexb; int ix[NB], jx[NB]; int aflag = 1; REAL_T *x_ptr[NB]; REAL_T **A_ptr_value = (REAL_T **) A->ptr_value; REAL_T **B_ptr_value = (REAL_T **) B->ptr_value; REAL_T **C_ptr_value = (REAL_T **) C->ptr_value; double adjust_threshold = threshold; memset(ix, 0, NB * sizeof(int)); memset(jx, 0, NB * sizeof(int)); int maxbsize = 0; for (int ib = 0; ib < NB; ib++) maxbsize = MAX(maxbsize, bsize[ib]); int maxbsize2 = maxbsize * maxbsize; for (int ib = 0; ib < NB; ib++) x_ptr[ib] = calloc(maxbsize2, sizeof(REAL_T)); while (aflag > 0) { aflag = 0; for (int ib = 0; ib < NB; ib++) { int l = 0; for (int jp = 0; jp < A_nnzb[ib]; jp++) { REAL_T *a = A_ptr_value[ROWMAJOR(ib, jp, NB, MB)]; int jb = A_indexb[ROWMAJOR(ib, jp, NB, MB)]; for (int kp = 0; kp < B_nnzb[jb]; kp++) { int kb = B_indexb[ROWMAJOR(jb, kp, NB, MB)]; if (ix[kb] == 0) { memset(x_ptr[kb], 0, maxbsize2); jx[l] = kb; ix[kb] = ib + 1; l++; } // TEMPORARY STORAGE VECTOR LENGTH FULL N REAL_T *b = B_ptr_value[ROWMAJOR(jb, kp, NB, MB)]; REAL_T *x = x_ptr[kb]; for (int ii = 0; ii < bsize[jb]; ii++) for (int jj = 0; jj < bsize[kb]; jj++) { int k = ii * bsize[kb] + jj; x[k] = x[k] + a[ROWMAJOR(ii, jj, bsize[ib], bsize[jb])] * b[ROWMAJOR(jj, ii, bsize[jb], bsize[kb])]; } } } // Check for number of non-zeroes per row exceeded // Need to adjust threshold if (l > MB) { aflag = 1; } int ll = 0; for (int jb = 0; jb < l; jb++) { int jp = jx[jb]; REAL_T *xtmp = x_ptr[jp]; double normx = TYPED_FUNC(bml_norm_inf) (xtmp, bsize[ib], bsize[jp], bsize[jp]); // Diagonal elements are saved in first column if (jp == ib) { memcpy(C_ptr_value[ROWMAJOR(ib, ll, NB, MB)], xtmp, bsize[ib] * bsize[ll] * sizeof(REAL_T)); C_indexb[ROWMAJOR(ib, ll, NB, MB)] = jp; ll++; } else if (normx > adjust_threshold) { memcpy(C_ptr_value[ROWMAJOR(ib, ll, NB, MB)], xtmp, bsize[ib] * bsize[ll] * sizeof(REAL_T)); C_indexb[ROWMAJOR(ib, ll, NB, MB)] = jp; ll++; } ix[jp] = 0; memset(x_ptr[jp], 0, maxbsize2 * sizeof(REAL_T)); } C_nnzb[ib] = ll; } adjust_threshold *= 2.0; } }
Par-08-ParallelForParallelFor.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; } } #pragma omp parallel for for (int j = 0; j < 4; ++j) { b[j] += a[j]; } return 0; }
pi_task.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. This version of the program uses a divide and concquer algorithm with tasks and taskwait. History: Written by Tim Mattson, 10/2013 */ #include <omp.h> #include <stdio.h> static long num_steps = 1024*1024*1024; #define MIN_BLK 1024*1024*256 #define MAX 4 double pi_comp(int Nstart,int Nfinish,double step) { int i,iblk; double x, sum = 0.0,sum1, sum2; if (Nfinish-Nstart < MIN_BLK){ for (i=Nstart;i< Nfinish; i++){ x = (i+0.5)*step; sum = sum + 4.0/(1.0+x*x); } } else{ iblk = Nfinish-Nstart; #pragma omp task shared(sum1) sum1 = pi_comp(Nstart, Nfinish-iblk/2,step); #pragma omp task shared(sum2) sum2 = pi_comp(Nfinish-iblk/2, Nfinish, step); #pragma omp taskwait sum = sum1 + sum2; }return sum; } int main () { int i,j; double step, pi, sum; double init_time, final_time; step = 1.0/(double) num_steps; for (j=1; j<=MAX; j++){ omp_set_num_threads(j); init_time = omp_get_wtime(); #pragma omp parallel { #pragma omp single { printf("num threads=%d",omp_get_num_threads()); sum = pi_comp(0,num_steps,step); } } pi = step * sum; final_time = omp_get_wtime() - init_time; printf(" for %ld steps pi = %f in %f secs\n",num_steps,pi,final_time); } }
GB_binop__pow_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint64) // C=scalar+B GB (_bind1st__pow_uint64) // C=scalar+B' GB (_bind1st_tran__pow_uint64) // C=A+scalar GB (_bind2nd__pow_uint64) // C=A'+scalar GB (_bind2nd_tran__pow_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT64 || GxB_NO_POW_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : ExprDependenceBits; }; enum { NumExprBits = NumStmtBits + 5 + ExprDependenceBits }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is trail-allocated. unsigned ResultKind : 2; /// Kind of Result as defined by APValue::Kind unsigned APValueKind : 4; /// When ResultKind == RSK_Int64. whether the trail-allocated integer is /// signed. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated /// integer. 7 bits because it is the minimal number of bit to represent a /// value from 0 to 64 (the size of the trail-allocated number). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the /// destructor on the trail-allocated APValue. unsigned HasCleanup : 1; /// Whether this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 8; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 8; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
nr_numint.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "cint.h" #include "vhf/fblas.h" #include <assert.h> #define BOXSIZE 64 #define MIN(X,Y) ((X)>(Y)?(Y):(X)) static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; if (nao <= BOXSIZE) { dgemm_(&TRANS_N, &TRANS_N, &nocc, &ngrids, &nao, &D1, dm, &nocc, ao, &nao, &D0, vm, &nocc); return; } char empty[nbas]; int nbox = (int)((nao-1)/BOXSIZE) + 1; int box_id, bas_id, nd, b0, blen; int ao_id = 0; for (box_id = 0; box_id < nbox; box_id++) { empty[box_id] = 1; } box_id = 0; b0 = BOXSIZE; for (bas_id = 0; bas_id < nbas; bas_id++) { nd = (bas[ANG_OF] * 2 + 1) * bas[NCTR_OF]; assert(nd < BOXSIZE); ao_id += nd; empty[box_id] &= !non0table[bas_id]; if (ao_id == b0) { box_id++; b0 += BOXSIZE; } else if (ao_id > b0) { box_id++; b0 += BOXSIZE; empty[box_id] = !non0table[bas_id]; } bas += BAS_SLOTS; } memset(vm, 0, sizeof(double) * ngrids * nocc); for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_N, &nocc, &ngrids, &blen, &D1, dm+b0*nocc, &nocc, ao+b0, &nao, &D1, vm, &nocc); } } } /* vm[ngrids,nocc] = ao[ngrids,i] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int blksize, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int nblk = (ngrids+blksize-1) / blksize; int ip, ib; #pragma omp parallel default(none) \ shared(vm, ao, dm, nao, nocc, ngrids, blksize, non0table, \ atm, natm, bas, nbas, env) \ private(ip, ib) #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * blksize; dot_ao_dm(vm+ip*nocc, ao+ip*nao, dm, nao, nocc, MIN(ngrids-ip, blksize), non0table+ib*nbas, atm, natm, bas, nbas, env); } } static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (nao <= BOXSIZE) { dgemm_(&TRANS_N, &TRANS_T, &nao, &nao, &ngrids, &D1, ao2, &nao, ao1, &nao, &D1, vv, &nao); return; } char empty[nbas]; int nbox = (int)((nao-1)/BOXSIZE) + 1; int box_id, bas_id, nd, b0; int ao_id = 0; for (box_id = 0; box_id < nbox; box_id++) { empty[box_id] = 1; } box_id = 0; b0 = BOXSIZE; for (bas_id = 0; bas_id < nbas; bas_id++) { nd = (bas[ANG_OF] * 2 + 1) * bas[NCTR_OF]; assert(nd < BOXSIZE); ao_id += nd; empty[box_id] &= !non0table[bas_id]; if (ao_id == b0) { box_id++; b0 += BOXSIZE; } else if (ao_id > b0) { box_id++; b0 += BOXSIZE; empty[box_id] = !non0table[bas_id]; } bas += BAS_SLOTS; } int ib, jb, b0i, b0j, leni, lenj; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); for (jb = 0; jb < nbox; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &lenj, &leni, &ngrids, &D1, ao2+b0j, &nao, ao1+b0i, &nao, &D1, vv+b0i*nao+b0j, &nao); } } } } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int blksize, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int nblk = (ngrids+blksize-1) / blksize; int ip, ib; double *v_priv; memset(vv, 0, sizeof(double) * nao * nao); #pragma omp parallel default(none) \ shared(vv, ao1, ao2, nao, ngrids, blksize, non0table, \ atm, natm, bas, nbas, env) \ private(ip, ib, v_priv) { v_priv = calloc(nao*nao, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * blksize; dot_ao_ao(v_priv, ao1+ip*nao, ao2+ip*nao, nao, MIN(ngrids-ip, blksize), non0table+ib*nbas, atm, natm, bas, nbas, env); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } }
Pragma.h
//===- Pragma.h - Pragma registration and handling --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PRAGMA_H #define LLVM_CLANG_LEX_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <string> namespace clang { class PragmaNamespace; class Preprocessor; class Token; /** * Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: PragmaHandler() = default; explicit PragmaHandler(StringRef name) : Name(name) {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return nullptr; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: explicit EmptyPragmaHandler(StringRef Name = StringRef()); void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. llvm::StringMap<PragmaHandler *> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} ~PragmaNamespace() override; /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() const { return Handlers.empty(); } void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &Tok) override; PragmaNamespace *getIfNamespace() override { return this; } }; } // namespace clang #endif // LLVM_CLANG_LEX_PRAGMA_H
game.h
#pragma once #ifndef GAME_H #define GAME_H #include <mana/core/memoryallocator.h> // #include <chaos/chaos.h> #include <core/dimensions.h> #include <core/entity.h> #include <core/gamestate.h> #include <core/position.h> #include <core/scenery.h> #include <mana/audio/audiomanager.h> #include <mana/core/inputmanager.h> #include <mana/graphics/entities/manifoldplanet.h> #include <mana/graphics/entities/sprite.h> #include <mana/graphics/shaders/fxaashader.h> #include <mana/graphics/shaders/manifolddualcontouringshader.h> #include <mana/graphics/shaders/spriteanimationshader.h> #include <mana/graphics/shaders/spriteshader.h> #include <mana/graphics/utilities/camera.h> #include <mana/graphics/utilities/spriteanimation.h> #include <mana/mana.h> #include "core/jobsystem.h" #include "core/render.h" #include "utilities/playercamera.h" #include "utilities/resourcemanager.h" struct Game { struct Window* window; struct SpriteShader sprite_shader; struct SpriteAnimationShader sprite_animation_shader; struct FXAAShader fxaa_shader; struct PlayerCamera player_camera; struct GameState* game_state; struct ComponentRegistry render_registry; struct JobSystem job_system; struct ResourceManager resource_manager; struct ManifoldDualContouringShader planet_shader; struct ManifoldPlanet planet; }; void game_init(struct Game* game, struct Mana* mana, struct Window* window); void game_delete(struct Game* game, struct Mana* mana); void game_update(struct Game* game, struct Mana* mana, double delta_time); void game_update_camera(struct Game* game, struct Engine* engine); void game_update_input(struct Game* game, struct Engine* engine); static inline void game_check_for_window_resize(struct Game* game, struct GPUAPI* gpu_api) { // When the window is resized everything must be recreated in vulkan if (gpu_api->vulkan_state->reset_shaders) { gpu_api->vulkan_state->reset_shaders = 0; vkDeviceWaitIdle(gpu_api->vulkan_state->device); fxaa_shader_delete(&game->fxaa_shader, gpu_api); fxaa_shader_init(&game->fxaa_shader, gpu_api); game->fxaa_shader.on = 0; sprite_shader_delete(&game->sprite_shader, gpu_api); sprite_shader_init(&game->sprite_shader, gpu_api, 0); sprite_animation_shader_delete(&game->sprite_animation_shader, gpu_api); sprite_animation_shader_init(&game->sprite_animation_shader, gpu_api, 0); /*for (int entity_num = 0; entity_num < array_list_size(&game->stage_entity_render_list); entity_num++) { struct Entity* entity = array_list_get(&game->stage_entity_render_list, entity_num); (*entity->recreate_func)(entity->entity_data, gpu_api); }*/ } } static inline void game_update_jobs(struct Game* game, struct GPUAPI* gpu_api) { // Update sprites /*struct EntityUpdateData* sprites_update_data_pool = malloc(sizeof(struct EntityUpdateData) * array_list_size(&game->stage_entity_render_list)); struct Job* sprites_update_job_pool = malloc(sizeof(struct Job) * array_list_size(&game->stage_entity_render_list)); for (int entity_num = 0; entity_num < array_list_size(&game->stage_entity_render_list); entity_num++) { struct Entity* entity = array_list_get(&game->stage_entity_render_list, entity_num); sprites_update_data_pool[entity_num] = (struct EntityUpdateData){.game_handle = game, .entity_handle = entity, .delta_time = delta_time}; sprites_update_job_pool[entity_num] = (struct Job){.job_func = entity_update_job, .job_data = &sprites_update_data_pool[entity_num]}; job_system_enqueue(game->job_system, &sprites_update_job_pool[entity_num]); } job_system_start_threads(game->job_system); job_system_wait(game->job_system); free(scenery_update_data_pool); free(scenery_update_job_pool); free(sprites_update_data_pool); free(sprites_update_job_pool);*/ } static inline void game_sort_render_entites(struct Game* game, struct GPUAPI* gpu_api, struct ArrayList* sorted_render_list) { char* sorted_render_list_key = NULL; struct MapIter sorted_render_list_iter = map_iter(); // Build list of entities that need to be rendered while ((sorted_render_list_key = map_next(&game->render_registry.registry, &sorted_render_list_iter))) array_list_add(sorted_render_list, sorted_render_list_key); // TODO: Look into multithreaded merge sort // Sort render list for draw order for (int render_num = 0; render_num < array_list_size(sorted_render_list); render_num++) { for (int other_render_num = render_num; other_render_num > 0; other_render_num--) { struct Position* position_one = component_registry_get(&game->game_state->position_registry, array_list_get(sorted_render_list, other_render_num)); struct Position* position_two = component_registry_get(&game->game_state->position_registry, array_list_get(sorted_render_list, other_render_num - 1)); // TODO: Check within float range if (position_one->z > position_two->z) continue; // TODO: Add if within z range then check y range/implicit function //if (entity_one->position.y - entity_one->height / 2.0f < entity_two->position.y - entity_two->height / 2.0f) // continue; array_list_swap(sorted_render_list, other_render_num, other_render_num - 1); } } } static inline void game_render_entities(struct Game* game, struct GPUAPI* gpu_api, struct ArrayList* sorted_render_list) { for (int render_num = 0; render_num < array_list_size(sorted_render_list); render_num++) { char* entity_id = array_list_get(sorted_render_list, render_num); struct Render* next_render = component_registry_get(&game->render_registry, entity_id); sprite_update_uniforms(&next_render->sprite, gpu_api); sprite_render(&next_render->sprite, gpu_api); } } static inline void game_hotswap_scenery(struct Game* game, struct GPUAPI* gpu_api) { // Clear any existing scenery if (game->game_state->scenery_registry.registry.num_nodes > 0) { char* scenery_key = NULL; struct MapIter scenery_iter = map_iter(); while ((scenery_key = map_next(&game->game_state->scenery_registry.registry, &scenery_iter))) { sprite_delete(&((struct Render*)map_get(&game->render_registry.registry, scenery_key))->sprite, gpu_api); component_registry_remove(&game->game_state->position_registry, scenery_key); component_registry_remove(&game->game_state->dimensions_registry, scenery_key); component_registry_remove(&game->render_registry, scenery_key); // TODO: Write better way to remove single entity for (int entity_num = 0; entity_num < vector_size(&game->game_state->entities); entity_num++) { if (strcmp(scenery_key, ((struct Entity*)vector_get(&game->game_state->entities, entity_num))->entity_id)) { vector_remove(&game->game_state->entities, entity_num); break; } } } component_registry_delete(&game->game_state->scenery_registry); component_registry_init(&game->game_state->scenery_registry, sizeof(struct Scenery)); } // Load scenery from xml file struct XmlNode* game_stage_node = xml_parser_load_xml_file("./assets/stages/gamestage.xml"); struct XmlNode* scenery_node = xml_node_get_child(game_stage_node, "scenery"); const char* scenery_list_key = NULL; struct MapIter scenery_list_iter = map_iter(); struct SceneryBucket { struct Scenery scenery[64]; struct XmlNode* node[64]; } texture_settings_bucket = {0}; // Find each piece of unique scenery int total_scenery_num = 0; while ((scenery_list_key = map_next(scenery_node->child_nodes, &scenery_list_iter))) texture_settings_bucket.node[total_scenery_num++] = array_list_get(*((struct ArrayList**)map_get(scenery_node->child_nodes, scenery_list_key)), 0); // Build components from xml data //#pragma omp parallel for schedule(dynamic) for (int scenery_num = 0; scenery_num < total_scenery_num; scenery_num++) { struct XmlNode* position_node = xml_node_get_child(texture_settings_bucket.node[scenery_num], "position"); float scale = atof(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "scale"))); int repeat_factor = atoi(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "repeat"))); float offset = atof(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "offset"))); for (int repeat_num = 0; repeat_num < repeat_factor; repeat_num++) { struct Entity new_scenery_entity = {0}; entity_init(&new_scenery_entity); vector_push_back(&game->game_state->entities, &new_scenery_entity); struct Scenery new_scenery = (struct Scenery){.texture_path = xml_node_get_attribute(position_node, "path"), .repeat_factor = repeat_factor, .offset = offset}; component_registry_set(&game->game_state->scenery_registry, &new_scenery_entity, &new_scenery); struct Render new_scenery_render = {0}; sprite_init(&new_scenery_render.sprite, gpu_api, &game->sprite_shader.shader, texture_cache_get(&game->resource_manager.texture_cache, new_scenery.texture_path)); struct Dimensions new_scenery_dimensions = {.direction = 1.0f, .scale = scale, .height = new_scenery_render.sprite.height * scale, .width = new_scenery_render.sprite.width * scale}; component_registry_set(&game->game_state->dimensions_registry, &new_scenery_entity, &new_scenery_dimensions); struct Position new_scenery_position = (struct Position){.x = atof(xml_node_get_data(xml_node_get_child(position_node, "x"))) + (new_scenery_dimensions.width * repeat_num) * new_scenery.offset, .y = atof(xml_node_get_data(xml_node_get_child(position_node, "y"))), .z = atof(xml_node_get_data(xml_node_get_child(position_node, "z")))}; component_registry_set(&game->game_state->position_registry, &new_scenery_entity, &new_scenery_position); new_scenery_render.sprite.position = (vec3){.x = new_scenery_position.x, .y = new_scenery_position.y, .z = new_scenery_position.z}; new_scenery_render.sprite.scale = (vec3){.x = scale, .y = scale, .z = scale}; component_registry_set(&game->render_registry, &new_scenery_entity, &new_scenery_render); } } xml_parser_delete(game_stage_node); } #endif //GAME_H
GB_binop__land_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_uint32 // A.*B function (eWiseMult): GB_AemultB__land_uint32 // A*D function (colscale): GB_AxD__land_uint32 // D*A function (rowscale): GB_DxB__land_uint32 // C+=B function (dense accum): GB_Cdense_accumB__land_uint32 // C+=b function (dense accum): GB_Cdense_accumb__land_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint32 // C=scalar+B GB_bind1st__land_uint32 // C=scalar+B' GB_bind1st_tran__land_uint32 // C=A+scalar GB_bind2nd__land_uint32 // C=A'+scalar GB_bind2nd_tran__land_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) && (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT32 || GxB_NO_LAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__land_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__land_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__land_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parallel_simplicial_ldl.h
// // Created by kazem on 4/12/19. // #ifndef PROJECT_PARALLEL_SIMPLICIAL_LDL_H #define PROJECT_PARALLEL_SIMPLICIAL_LDL_H namespace nasoq{ #include "omp.h" int ldl_parallel_left_simplicial_01 (int n, int* c, int* r, double* values, int* cT, int* rT, int* lC, int* lR, double* &lValues, double *d, #if 0 int *prunePtr, int *pruneSet, #endif int *eTree, int nLevels, int *levelPtr, int nPar, int *parPtr, int *partition) { //ws n, ws_int size of 3*n /* * Performs a Cholesky decomposition on a given matrix (c, r, values), i.e. * stored in compressed column format, and produces L, which are * stored in column compressed format. * (n, c, r, values) : IN : input matrix * (lC, lR) : IN : The column and rwo sparsity patterns of L * (lValues) : OUT : the nonzero values of the L factor * (pruneSet, prunePtr) : IN : the row sparsity pattern of the factor L */ int top = 0; double *f; int *xi; //omp_set_num_threads(1); omp_set_nested(1); for (int i1 = 0; i1 < nLevels; ++i1) { #pragma omp parallel private(f, xi) { #pragma omp for schedule(static) private(f, xi) for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { f = new double[n](); xi = new int[2 * n](); //int pls = levelSet[j1]; for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int colNo = partition[k1]; int spCol = 0; //Uncompress a col into a 1D array for (int nzNo = c[colNo]; nzNo < c[colNo + 1]; ++nzNo) { f[r[nzNo]] = values[nzNo];//Copying nonzero of the col } #if 0 for (int i = prunePtr[colNo]; i < prunePtr[colNo + 1]-1; ++i) { spCol = pruneSet[i]; #endif top = ereach(n, cT, rT, colNo, eTree, xi, xi + n); //std::cout<<n-top<<";\n"; for (int i = top; i < n; ++i) { spCol = xi[i]; bool sw = false; double facing_val = 0, tmp = 0; int facing_idx = -1; for (int l = lC[spCol]; l < lC[spCol + 1]; ++l) { if (lR[l] == colNo) { facing_val = lValues[l]; tmp = facing_val * d[spCol]; facing_idx = l; break; } } assert(facing_idx >= 0); for (int l = facing_idx + 1; l < lC[spCol + 1]; ++l) { f[lR[l]] -= lValues[l] * tmp; } d[colNo] += facing_val * tmp; } d[colNo] = f[colNo] - d[colNo]; double diag = d[colNo]; //double tmpSqrt = sqrt(f[colNo]); f[colNo] = 0; lValues[lC[colNo]] = 1; for (int j = lC[colNo] + 1; j < lC[colNo + 1]; ++j) { lValues[j] = f[lR[j]] / diag; f[lR[j]] = 0; } } delete[]f; delete[]xi; } } } return 1; } } #endif //PROJECT_PARALLEL_SIMPLICIAL_LDL_H
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
nr_ao2mo.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> //#define NDEBUG //#include <omp.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #include "vhf/cvhf.h" #include "vhf/fblas.h" #include "vhf/nr_direct.h" #include "nr_ao2mo.h" #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) // 9f or 7g or 5h functions should be enough #define NCTRMAX 64 #define OUTPUTIJ 1 #define INPUT_IJ 2 /* * Denoting 2e integrals (ij|kl), * AO2MOnr_e1_drv transforms ij for ksh_start <= k shell < ksh_end. * The transformation C_pi C_qj (pq|k*) coefficients are stored in * mo_coeff, C_pi and C_qj are offset by i_start and i_count, j_start and j_count. * The output eri is an 2D array, ordered as (kl-AO-pair,ij-MO-pair) in * C-order. Transposing is needed before calling AO2MOnr_e2_drv. * * AO2MOnr_e2_drv transforms kl for nijcount of ij pairs. * vin is assumed to be an C-array of (ij-MO-pair, kl-AO-pair) * vout is an C-array of (ij-MO-pair, kl-MO-pair) * * ftranse1 and ftranse2 * --------------------- * AO2MOtranse1_nr_s4, AO2MOtranse1_nr_s2ij, AO2MOtranse1_nr_s2kl AO2MOtranse1_nr_s1 * AO2MOtranse2_nr_s4, AO2MOtranse2_nr_s2ij, AO2MOtranse2_nr_s2kl AO2MOtranse2_nr_s1 * Labels s4, s2, s1 are used to label the AO integral symmetry. The * symmetry of transformed integrals are controled by function fmmm * * fmmm * ---- * fmmm dim requirements: * | vout | eri * ---------------------+-------------------------------+------------------- * AO2MOmmm_nr_s2_s2 | [:,bra_count*(bra_count+1)/2] | [:,nao*(nao+1)/2] * | and bra_count==ket_count | * AO2MOmmm_nr_s2_iltj | [:,bra_count*ket_count] | [:,nao*nao] * AO2MOmmm_nr_s2_igtj | [:,bra_count*ket_count] | [:,nao*nao] * AO2MOmmm_nr_s1_iltj | [:,bra_count*ket_count] | [:,nao*nao] * AO2MOmmm_nr_s1_igtj | [:,bra_count*ket_count] | [:,nao*nao] * * AO2MOmmm_nr_s1_iltj, AO2MOmmm_nr_s1_igtj, AO2MOmmm_nr_s2_s2, * AO2MOmmm_nr_s2_iltj, AO2MOmmm_nr_s2_igtj * Pick a proper function from the 5 kinds of AO2MO transformation. * 1. AO integral I_ij != I_ji, use * AO2MOmmm_nr_s1_iltj or AO2MOmmm_nr_s1_igtj * 2. AO integral I_ij == I_ji, but the MO coefficients for bra and ket * are different, use * AO2MOmmm_nr_s2_iltj or AO2MOmmm_nr_s2_igtj * 3. AO integral I_ij == I_ji, and the MO coefficients are the same for * bra and ket, use * AO2MOmmm_nr_s2_s2 * * ftrans | allowed fmmm * ----------------------+--------------------- * AO2MOtranse1_nr_s4 | AO2MOmmm_nr_s2_s2 * AO2MOtranse1_nr_s2ij | AO2MOmmm_nr_s2_iltj * AO2MOtranse2_nr_s4 | AO2MOmmm_nr_s2_igtj * AO2MOtranse2_nr_s2kl | * ----------------------+--------------------- * AO2MOtranse1_nr_s2kl | AO2MOmmm_nr_s2_s2 * AO2MOtranse2_nr_s2ij | AO2MOmmm_nr_s2_igtj * | AO2MOmmm_nr_s2_iltj * ----------------------+--------------------- * AO2MOtranse1_nr_s1 | AO2MOmmm_nr_s1_iltj * AO2MOtranse2_nr_s1 | AO2MOmmm_nr_s1_igtj * */ /* for m > n * calculate the upper triangle part (of Fortran order matrix) * _ |------- n -------| _ * diag_off [ . . . . . . . . ] | * _ [ . . . . . . . . ] m * [ . . . . . . . ] | * [ . . . . . . ] _ */ void AO2MOdtriumm_o1(int m, int n, int k, int diag_off, double *a, double *b, double *c) { const double D0 = 0; const double D1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int BLK = 48; int mstart = m - MAX(0, (m-diag_off)/BLK)*BLK; int nstart = mstart - diag_off; int nleft; dgemm_(&TRANS_T, &TRANS_N, &mstart, &n, &k, &D1, a, &k, b, &k, &D0, c, &m); for (; mstart < m; mstart+=BLK, nstart+=BLK) { nleft = n - nstart; dgemm_(&TRANS_T, &TRANS_N, &BLK, &nleft, &k, &D1, a+mstart*k, &k, b+nstart*k, &k, &D0, c+nstart*m+mstart, &m); } } /* for m < n * calculate the upper triangle part (of Fortran order matrix) * _ |------- n -------| _ * diag_off [ . . . . . . . . ] | * _ [ . . . . . . . . ] m * [ . . . . . . . ] | * [ . . . . . . ] _ */ void AO2MOdtriumm_o2(int m, int n, int k, int diag_off, double *a, double *b, double *c) { const double D0 = 0; const double D1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int BLK = 48; int nstart, nleft; int mend = diag_off; for (nstart = 0; nstart < m-diag_off-BLK; nstart+=BLK) { mend += BLK; dgemm_(&TRANS_T, &TRANS_N, &mend, &BLK, &k, &D1, a, &k, b+nstart*k, &k, &D0, c+nstart*m, &m); } nleft = n - nstart; dgemm_(&TRANS_T, &TRANS_N, &m, &nleft, &k, &D1, a, &k, b+nstart*k, &k, &D0, c+nstart*m, &m); } /* * s1-AO integrals to s1-MO integrals, efficient for i_count < j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*nao] * s1, s2 here to label the AO symmetry */ int AO2MOmmm_nr_s1_iltj(double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->bra_count * envs->ket_count; case INPUT_IJ: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; // C_pi (pq| = (iq|, where (pq| is in C-order dgemm_(&TRANS_N, &TRANS_N, &nao, &i_count, &nao, &D1, eri, &nao, mo_coeff+i_start*nao, &nao, &D0, buf, &nao); dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao, &D1, mo_coeff+j_start*nao, &nao, buf, &nao, &D0, vout, &j_count); return 0; } /* * s1-AO integrals to s1-MO integrals, efficient for i_count > j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*nao] */ int AO2MOmmm_nr_s1_igtj(double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->bra_count * envs->ket_count; case INPUT_IJ: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; // C_qj (pq| = (pj|, where (pq| is in C-order dgemm_(&TRANS_T, &TRANS_N, &j_count, &nao, &nao, &D1, mo_coeff+j_start*nao, &nao, eri, &nao, &D0, buf, &j_count); dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &nao, &D1, buf, &j_count, mo_coeff+i_start*nao, &nao, &D0, vout, &j_count); return 0; } /* * s2-AO integrals to s2-MO integrals * shape requirements: * vout[:,bra_count*(bra_count+1)/2] and bra_count==ket_count, * eri[:,nao*(nao+1)/2] * first s2 is the AO symmetry, second s2 is the MO symmetry */ int AO2MOmmm_nr_s2_s2(double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: assert(envs->bra_count == envs->ket_count); return envs->bra_count * (envs->bra_count+1) / 2; case INPUT_IJ: return envs->nao * (envs->nao+1) / 2; } const double D0 = 0; const double D1 = 1; const char SIDE_L = 'L'; const char UPLO_U = 'U'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; double *buf1 = buf + nao*i_count; int i, j, ij; // C_pi (pq| = (iq|, where (pq| is in C-order dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count, &D1, eri, &nao, mo_coeff+i_start*nao, &nao, &D0, buf, &nao); AO2MOdtriumm_o1(j_count, i_count, nao, 0, mo_coeff+j_start*nao, buf, buf1); for (i = 0, ij = 0; i < i_count; i++) { for (j = 0; j <= i; j++, ij++) { vout[ij] = buf1[j]; } buf1 += j_count; } return 0; } /* * s2-AO integrals to s1-MO integrals, efficient for i_count < j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*(nao+1)/2] */ int AO2MOmmm_nr_s2_iltj(double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->bra_count * envs->ket_count; case INPUT_IJ: return envs->nao * (envs->nao+1) / 2; } const double D0 = 0; const double D1 = 1; const char SIDE_L = 'L'; const char UPLO_U = 'U'; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; // C_pi (pq| = (iq|, where (pq| is in C-order dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count, &D1, eri, &nao, mo_coeff+i_start*nao, &nao, &D0, buf, &nao); // C_qj (iq| = (ij| dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao, &D1, mo_coeff+j_start*nao, &nao, buf, &nao, &D0, vout, &j_count); return 0; } /* * s2-AO integrals to s1-MO integrals, efficient for i_count > j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*(nao+1)/2] */ int AO2MOmmm_nr_s2_igtj(double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->bra_count * envs->ket_count; case INPUT_IJ: return envs->nao * (envs->nao+1) / 2; } const double D0 = 0; const double D1 = 1; const char SIDE_L = 'L'; const char UPLO_U = 'U'; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; // C_qj (pq| = (pj|, where (pq| is in C-order dsymm_(&SIDE_L, &UPLO_U, &nao, &j_count, &D1, eri, &nao, mo_coeff+j_start*nao, &nao, &D0, buf, &nao); // C_pi (pj| = (ij| dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao, &D1, buf, &nao, mo_coeff+i_start*nao, &nao, &D0, vout, &j_count); return 0; } /* * transform bra, s1 to label AO symmetry */ int AO2MOmmm_bra_nr_s1(double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case 1: return envs->bra_count * envs->nao; case 2: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_N = 'N'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; double *mo_coeff = envs->mo_coeff; dgemm_(&TRANS_N, &TRANS_N, &nao, &i_count, &nao, &D1, vin, &nao, mo_coeff+i_start*nao, &nao, &D0, vout, &nao); return 0; } /* * transform ket, s1 to label AO symmetry */ int AO2MOmmm_ket_nr_s1(double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->nao * envs->ket_count; case INPUT_IJ: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int nao = envs->nao; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; dgemm_(&TRANS_T, &TRANS_N, &j_count, &nao, &nao, &D1, mo_coeff+j_start*nao, &nao, vin, &nao, &D0, vout, &j_count); return 0; } /* * transform bra, s2 to label AO symmetry */ int AO2MOmmm_bra_nr_s2(double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->bra_count * envs->nao; case INPUT_IJ: return envs->nao * (envs->nao+1) / 2; } const double D0 = 0; const double D1 = 1; const char SIDE_L = 'L'; const char UPLO_U = 'U'; int nao = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; double *mo_coeff = envs->mo_coeff; dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count, &D1, vin, &nao, mo_coeff+i_start*nao, &nao, &D0, vout, &nao); return 0; } /* * transform ket, s2 to label AO symmetry */ int AO2MOmmm_ket_nr_s2(double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case OUTPUTIJ: return envs->nao * envs->ket_count; case INPUT_IJ: return envs->nao * (envs->nao+1) / 2; } const double D0 = 0; const double D1 = 1; const char SIDE_L = 'L'; const char UPLO_U = 'U'; int nao = envs->nao; int j_start = envs->ket_start; int j_count = envs->ket_count; double *mo_coeff = envs->mo_coeff; int i, j; dsymm_(&SIDE_L, &UPLO_U, &nao, &j_count, &D1, vin, &nao, mo_coeff+j_start*nao, &nao, &D0, buf, &nao); for (j = 0; j < nao; j++) { for (i = 0; i < j_count; i++) { vout[i] = buf[i*nao+j]; } vout += j_count; } return 0; } /* * s1, s2ij, s2kl, s4 here to label the AO symmetry * eris[ncomp,nkl,nao_pair_ij] */ static void s4_copy(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints, *peri, *peri1; switch (di) { case 1: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); for (j = 0; j < dj; j++) { eri[j] = pints[j]; } eri += nao2; } } break; case 2: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); peri = eri + istride; for (j = 0; j < dj;j++) { eri [j] = pints[j*2+0]; peri[j] = pints[j*2+1]; } eri += nao2; } } break; case 3: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); peri = eri + istride; peri1 = peri + istride + 1; for (j = 0; j < dj;j++) { eri [j] = pints[j*3+0]; peri [j] = pints[j*3+1]; peri1[j] = pints[j*3+2]; } eri += nao2; } } break; default: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); peri = eri; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { //TODO: call nontemporal write to avoid write-allocate peri[j] = pints[j*di+i]; } peri += istride + i; } eri += nao2; } } } } static void s4_set0(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *peri, *peri1; switch (di) { case 1: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { for (j = 0; j < dj; j++) { eri[j] = 0; } eri += nao2; } } break; case 2: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri = eri + istride; for (j = 0; j < dj; j++) { eri [j] = 0; peri[j] = 0; } eri += nao2; } } break; case 3: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri = eri + istride; peri1 = peri + istride + 1; for (j = 0; j < dj; j++) { eri [j] = 0; peri [j] = 0; peri1[j] = 0; } eri += nao2; } } break; default: for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri = eri; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { //TODO: call nontemporal write to avoid write-allocate peri[j] = 0; } peri += istride + i; } eri += nao2; } } } } static void s4_copy_keql(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints, *peri; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { pints = ints + di * dj * (l*dk+k); peri = eri; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri[j] = pints[j*di+i]; } peri += istride + i; } eri += nao2; } } } static void s4_set0_keql(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *peri; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { peri = eri; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri[j] = 0; } peri += istride + i; } eri += nao2; } } } static void s4_copy_ieqj(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints, *peri; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); peri = eri; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { peri[j] = pints[j*di+i]; } peri += istride + i; } eri += nao2; } } } static void s4_set0_ieqj(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *peri; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri = eri; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { peri[j] = 0; } peri += istride + i; } eri += nao2; } } } static void s4_copy_keql_ieqj(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints, *peri; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { pints = ints + di * dj * (l*dk+k); peri = eri; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { peri[j] = pints[j*di+i]; } peri += istride + i; } eri += nao2; } } } static void s4_set0_keql_ieqj(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *peri; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { peri = eri; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { peri[j] = 0; } peri += istride + i; } eri += nao2; } } } static void s2kl_copy_keql(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { pints = ints + di * dj * (l*dk+k); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { eri[i*istride+j] = pints[j*di+i]; } } eri += nao2; } } } static void s2kl_set0_keql(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; for (k = 0; k < dk; k++) { for (l = 0; l <= k; l++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { eri[i*istride+j] = 0; } } eri += nao2; } } } static void s1_copy(double *eri, double *ints, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; double *pints; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pints = ints + di * dj * (l*dk+k); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { eri[i*istride+j] = pints[j*di+i]; } } eri += nao2; } } } static void s1_set0(double *eri, double *nop, int di, int dj, int dk, int dl, int istride, size_t nao2) { int i, j, k, l; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { eri[i*istride+j] = 0; } } eri += nao2; } } } #define DISTR_INTS_BY(fcopy, fset0, istride) \ if ((*fprescreen)(shls, envs->vhfopt, envs->atm, envs->bas, envs->env) && \ (*intor)(buf, NULL, shls, envs->atm, envs->natm, \ envs->bas, envs->nbas, envs->env, envs->cintopt, NULL)) { \ pbuf = buf; \ for (icomp = 0; icomp < envs->ncomp; icomp++) { \ peri = eri + nao2 * nkl * icomp + ioff + ao_loc[jsh]; \ fcopy(peri, pbuf, di, dj, dk, dl, istride, nao2); \ pbuf += di * dj * dk * dl; \ } \ } else { \ for (icomp = 0; icomp < envs->ncomp; icomp++) { \ peri = eri + nao2 * nkl * icomp + ioff + ao_loc[jsh]; \ fset0(peri, pbuf, di, dj, dk, dl, istride, nao2); \ } \ } void AO2MOfill_nr_s1(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int nkl, int ish, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * nao; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; const int ioff = ao_loc[ish] * nao; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp; int shls[4]; double *pbuf, *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = kl / envs->nbas; lsh = kl - ksh * envs->nbas; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; for (jsh = 0; jsh < envs->nbas; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s1_copy, s1_set0, nao); } eri += nao2 * dk * dl; } } void AO2MOfill_nr_s2ij(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int nkl, int ish, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * (nao+1) / 2; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; const int ioff = ao_loc[ish] * (ao_loc[ish]+1) / 2; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp; int shls[4]; double *pbuf = buf; double *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = kl / envs->nbas; lsh = kl - ksh * envs->nbas; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; for (jsh = 0; jsh < ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s4_copy, s4_set0, ao_loc[ish]+1); } jsh = ish; dj = di; shls[1] = jsh; DISTR_INTS_BY(s4_copy_ieqj, s4_set0_ieqj, ao_loc[ish]+1); eri += nao2 * dk * dl; } } void AO2MOfill_nr_s2kl(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int nkl, int ish, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * nao; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; const int ioff = ao_loc[ish] * nao; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp; int shls[4]; double *pbuf = buf; double *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7); lsh = kl - ksh * (ksh+1) / 2; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; if (ksh == lsh) { for (jsh = 0; jsh < envs->nbas; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s2kl_copy_keql, s2kl_set0_keql, nao); } eri += nao2 * dk*(dk+1)/2; } else { for (jsh = 0; jsh < envs->nbas; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s1_copy, s1_set0, nao); } eri += nao2 * dk * dl; } } } void AO2MOfill_nr_s4(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int nkl, int ish, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * (nao+1) / 2; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; const int ioff = ao_loc[ish] * (ao_loc[ish]+1) / 2; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp; int shls[4]; double *pbuf = buf; double *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7); lsh = kl - ksh * (ksh+1) / 2; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; if (ksh == lsh) { for (jsh = 0; jsh < ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s4_copy_keql, s4_set0_keql, ao_loc[ish]+1); } jsh = ish; dj = di; shls[1] = ish; DISTR_INTS_BY(s4_copy_keql_ieqj, s4_set0_keql_ieqj, ao_loc[ish]+1); eri += nao2 * dk*(dk+1)/2; } else { for (jsh = 0; jsh < ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; DISTR_INTS_BY(s4_copy, s4_set0, ao_loc[ish]+1); } jsh = ish; dj = di; shls[1] = ish; DISTR_INTS_BY(s4_copy_ieqj, s4_set0_ieqj, ao_loc[ish]+1); eri += nao2 * dk * dl; } } } /* * ************************************************ * s1, s2ij, s2kl, s4 here to label the AO symmetry */ void AO2MOtranse1_nr_s1(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = envs->nao * envs->nao; (*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, buf, envs, 0); } void AO2MOtranse1_nr_s2ij(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { int nao = envs->nao; size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = nao*(nao+1)/2; NPdunpack_tril(nao, vin+nao2*row_id, buf, 0); (*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0); } void AO2MOtranse1_nr_s2(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse1_nr_s2ij(fmmm, row_id, vout, vin, buf, envs); } void AO2MOtranse1_nr_s2kl(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse1_nr_s1(fmmm, row_id, vout, vin, buf, envs); } void AO2MOtranse1_nr_s4(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse1_nr_s2ij(fmmm, row_id, vout, vin, buf, envs); } /* * ************************************************ * s1, s2ij, s2kl, s4 here to label the AO symmetry */ void AO2MOtranse2_nr_s1(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ); (*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, buf, envs, 0); } void AO2MOtranse2_nr_s2ij(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s1(fmmm, row_id, vout, vin, buf, envs); } void AO2MOtranse2_nr_s2kl(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { int nao = envs->nao; size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ); NPdunpack_tril(nao, vin+nao2*row_id, buf, 0); (*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0); } void AO2MOtranse2_nr_s2(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs); } void AO2MOtranse2_nr_s4(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs); } /* * ************************************************ * sort (shell-based) integral blocks then transform */ void AO2MOsortranse2_nr_s1(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { int nao = envs->nao; int *ao_loc = envs->ao_loc; size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ); int ish, jsh, di, dj; int i, j, ij; double *pbuf; vin += nao2 * row_id; ij = 0; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; for (jsh = 0; jsh < envs->nbas; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++, ij++) { pbuf[i*nao+j] = vin[ij]; } } } } (*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0); } void AO2MOsortranse2_nr_s2ij(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s1(fmmm, row_id, vout, vin, buf, envs); } void AO2MOsortranse2_nr_s2kl(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { int nao = envs->nao; int *ao_loc = envs->ao_loc; size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ); size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ); int ish, jsh, di, dj; int i, j, ij; double *pbuf; vin += nao2 * row_id; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; for (jsh = 0; jsh < ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pbuf[i*nao+j] = vin[i*dj+j]; } } vin += di * dj; } // lower triangle block when ish == jsh pbuf = buf + ao_loc[ish] * nao + ao_loc[ish]; for (ij = 0, i = 0; i < di; i++) { for (j = 0; j <= i; j++, ij++) { pbuf[i*nao+j] = vin[ij]; } } vin += di * (di+1) / 2; } (*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0); } void AO2MOsortranse2_nr_s2(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs); } void AO2MOsortranse2_nr_s4(int (*fmmm)(), int row_id, double *vout, double *vin, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs); } /* * ************************************************ * combine ftrans and fmmm */ void AO2MOtrans_nr_s1_iltj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s1(AO2MOmmm_nr_s1_iltj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_s1_igtj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s1(AO2MOmmm_nr_s1_igtj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_sorts1_iltj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s1(AO2MOmmm_nr_s1_iltj, row_id, vout, eri, buf,envs); } void AO2MOtrans_nr_sorts1_igtj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s1(AO2MOmmm_nr_s1_igtj, row_id, vout, eri, buf,envs); } void AO2MOtrans_nr_s2_iltj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_iltj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_s2_igtj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_igtj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_s2_s2(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_s2, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_sorts2_iltj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_iltj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_sorts2_igtj(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_igtj, row_id, vout, eri, buf, envs); } void AO2MOtrans_nr_sorts2_s2(void *nop, int row_id, double *vout, double *eri, double *buf, struct _AO2MOEnvs *envs) { AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_s2, row_id, vout, eri, buf,envs); } /* * ************************************************ * Denoting 2e integrals (ij|kl), * transform ij for ksh_start <= k shell < ksh_end. * The transformation C_pi C_qj (pq|k*) coefficients are stored in * mo_coeff, C_pi and C_qj are offset by i_start and i_count, j_start and j_count * * The output eri is an 2D array, ordered as (kl-AO-pair,ij-MO-pair) in * C-order. Transposing is needed before calling AO2MOnr_e2_drv. * eri[ncomp,nkl,mo_i,mo_j] */ void AO2MOnr_e1_drv(int (*intor)(), void (*fill)(), void (*ftrans)(), int (*fmmm)(), double *eri, double *mo_coeff, int klsh_start, int klsh_count, int nkl, int ncomp, int *orbs_slice, int *ao_loc, CINTOpt *cintopt, CVHFOpt *vhfopt, int *atm, int natm, int *bas, int nbas, double *env) { int nao = ao_loc[nbas]; double *eri_ao = malloc(sizeof(double) * nao*nao*nkl*ncomp); assert(eri_ao); AO2MOnr_e1fill_drv(intor, fill, eri_ao, klsh_start, klsh_count, nkl, ncomp, ao_loc, cintopt, vhfopt, atm, natm, bas, nbas, env); AO2MOnr_e2_drv(ftrans, fmmm, eri, eri_ao, mo_coeff, nkl*ncomp, nao, orbs_slice, ao_loc, nbas); free(eri_ao); } void AO2MOnr_e2_drv(void (*ftrans)(), int (*fmmm)(), double *vout, double *vin, double *mo_coeff, int nij, int nao, int *orbs_slice, int *ao_loc, int nbas) { struct _AO2MOEnvs envs; envs.bra_start = orbs_slice[0]; envs.bra_count = orbs_slice[1] - orbs_slice[0]; envs.ket_start = orbs_slice[2]; envs.ket_count = orbs_slice[3] - orbs_slice[2]; envs.nao = nao; envs.nbas = nbas; envs.ao_loc = ao_loc; envs.mo_coeff = mo_coeff; #pragma omp parallel default(none) \ shared(ftrans, fmmm, vout, vin, nij, envs, nao, orbs_slice) { int i; int i_count = envs.bra_count; int j_count = envs.ket_count; double *buf = malloc(sizeof(double) * (nao+i_count) * (nao+j_count)); #pragma omp for schedule(dynamic) for (i = 0; i < nij; i++) { (*ftrans)(fmmm, i, vout, vin, buf, &envs); } free(buf); } } /* * The size of eri is ncomp*nkl*nao*nao, note the upper triangular part * may not be filled */ void AO2MOnr_e1fill_drv(int (*intor)(), void (*fill)(), double *eri, int klsh_start, int klsh_count, int nkl, int ncomp, int *ao_loc, CINTOpt *cintopt, CVHFOpt *vhfopt, int *atm, int natm, int *bas, int nbas, double *env) { int i; int nao = ao_loc[nbas]; int dmax = 0; for (i= 0; i< nbas; i++) { dmax = MAX(dmax, ao_loc[i+1]-ao_loc[i]); } struct _AO2MOEnvs envs = {natm, nbas, atm, bas, env, nao, klsh_start, klsh_count, 0, 0, 0, 0, ncomp, ao_loc, NULL, cintopt, vhfopt}; int (*fprescreen)(); if (vhfopt) { fprescreen = vhfopt->fprescreen; } else { fprescreen = CVHFnoscreen; } #pragma omp parallel default(none) \ shared(fill, fprescreen, eri, envs, intor, nkl, nbas, dmax, ncomp) { int ish; double *buf = malloc(sizeof(double)*dmax*dmax*dmax*dmax*ncomp); #pragma omp for schedule(dynamic, 1) for (ish = 0; ish < nbas; ish++) { (*fill)(intor, fprescreen, eri, buf, nkl, ish, &envs); } free(buf); } }
nmf_pgd.c
/* Generated by Cython 0.29.23 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "language": "c", "name": "gensim.models.nmf_pgd", "sources": [ "gensim/models/nmf_pgd.pyx" ] }, "module_name": "gensim.models.nmf_pgd" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_23" #define CYTHON_HEX_VERSION 0x001D17F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__gensim__models__nmf_pgd #define __PYX_HAVE_API__gensim__models__nmf_pgd /* Early includes */ #include <math.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "gensim/models/nmf_pgd.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'gensim.models.nmf_pgd' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double, double); /*proto*/ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "gensim.models.nmf_pgd" extern int __pyx_module_is_main_gensim__models__nmf_pgd; int __pyx_module_is_main_gensim__models__nmf_pgd = 0; /* Implementation of 'gensim.models.nmf_pgd' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_h[] = "h"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_WtW[] = "WtW"; static const char __pyx_k_Wtv[] = "Wtv"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_grad[] = "grad"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_kappa[] = "kappa"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_hessian[] = "hessian"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_solve_h[] = "solve_h"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_n_samples[] = "n_samples"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_violation[] = "violation"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_sample_idx[] = "sample_idx"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_permutation[] = "permutation"; static const char __pyx_k_n_components[] = "n_components"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_projected_grad[] = "projected_grad"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_component_idx_1[] = "component_idx_1"; static const char __pyx_k_component_idx_2[] = "component_idx_2"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_gensim_models_nmf_pgd[] = "gensim.models.nmf_pgd"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_gensim_models_nmf_pgd_pyx[] = "gensim/models/nmf_pgd.pyx"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_WtW; static PyObject *__pyx_n_s_Wtv; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_component_idx_1; static PyObject *__pyx_n_s_component_idx_2; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_gensim_models_nmf_pgd; static PyObject *__pyx_kp_s_gensim_models_nmf_pgd_pyx; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_grad; static PyObject *__pyx_n_s_h; static PyObject *__pyx_n_s_hessian; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_kappa; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_n_components; static PyObject *__pyx_n_s_n_samples; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_permutation; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_projected_grad; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_sample_idx; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_solve_h; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violation; static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":13 * * cdef double fmin(double x, double y) nogil: * return x if x < y else y # <<<<<<<<<<<<<< * * cdef double fmax(double x, double y) nogil: */ if (((__pyx_v_x < __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":16 * * cdef double fmax(double x, double y) nogil: * return x if x > y else y # <<<<<<<<<<<<<< * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): */ if (((__pyx_v_x > __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* Python wrapper */ static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6gensim_6models_7nmf_pgd_solve_h[] = "solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa)\nFind optimal dense vector representation for current W and r matrices.\n\n Parameters\n ----------\n h : matrix\n Dense representation of documents in current batch.\n Wtv : matrix\n WtW : matrix\n\n Returns\n -------\n float\n Cumulative difference between previous and current h vectors.\n\n "; static PyMethodDef __pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h = {"solve_h", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6gensim_6models_7nmf_pgd_solve_h}; static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_h = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Wtv = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_WtW = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_permutation = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_kappa; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("solve_h (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_h,&__pyx_n_s_Wtv,&__pyx_n_s_WtW,&__pyx_n_s_permutation,&__pyx_n_s_kappa,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_h)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Wtv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 1); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_WtW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 2); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permutation)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 3); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 4); __PYX_ERR(0, 18, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "solve_h") < 0)) __PYX_ERR(0, 18, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_h = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_h.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_Wtv = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Wtv.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_WtW = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_WtW.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_permutation = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_permutation.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_kappa = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_kappa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 18, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6gensim_6models_7nmf_pgd_solve_h(__pyx_self, __pyx_v_h, __pyx_v_Wtv, __pyx_v_WtW, __pyx_v_permutation, __pyx_v_kappa); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa) { Py_ssize_t __pyx_v_n_components; CYTHON_UNUSED Py_ssize_t __pyx_v_n_samples; double __pyx_v_violation; double __pyx_v_grad; double __pyx_v_projected_grad; double __pyx_v_hessian; Py_ssize_t __pyx_v_sample_idx; Py_ssize_t __pyx_v_component_idx_1; Py_ssize_t __pyx_v_component_idx_2; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; double __pyx_t_14; PyObject *__pyx_t_15 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("solve_h", 0); /* "gensim/models/nmf_pgd.pyx":35 * """ * * cdef Py_ssize_t n_components = h.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 */ __pyx_v_n_components = (__pyx_v_h.shape[0]); /* "gensim/models/nmf_pgd.pyx":36 * * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] # <<<<<<<<<<<<<< * cdef double violation = 0 * cdef double grad, projected_grad, hessian */ __pyx_v_n_samples = (__pyx_v_h.shape[1]); /* "gensim/models/nmf_pgd.pyx":37 * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 # <<<<<<<<<<<<<< * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 */ __pyx_v_violation = 0.0; /* "gensim/models/nmf_pgd.pyx":39 * cdef double violation = 0 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 */ __pyx_v_sample_idx = 0; /* "gensim/models/nmf_pgd.pyx":40 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_2 = 0 * */ __pyx_v_component_idx_1 = 0; /* "gensim/models/nmf_pgd.pyx":41 * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 # <<<<<<<<<<<<<< * * for sample_idx in prange(n_samples, nogil=True): */ __pyx_v_component_idx_2 = 0; /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_n_samples; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_violation) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_component_idx_1) lastprivate(__pyx_v_component_idx_2) lastprivate(__pyx_v_grad) lastprivate(__pyx_v_hessian) lastprivate(__pyx_v_projected_grad) firstprivate(__pyx_v_sample_idx) lastprivate(__pyx_v_sample_idx) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_sample_idx = (Py_ssize_t)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_component_idx_1 = ((Py_ssize_t)0xbad0bad0); __pyx_v_component_idx_2 = ((Py_ssize_t)0xbad0bad0); __pyx_v_grad = ((double)__PYX_NAN()); __pyx_v_hessian = ((double)__PYX_NAN()); __pyx_v_projected_grad = ((double)__PYX_NAN()); /* "gensim/models/nmf_pgd.pyx":44 * * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): # <<<<<<<<<<<<<< * component_idx_1 = permutation[component_idx_1] * */ __pyx_t_4 = __pyx_v_n_components; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_component_idx_1 = __pyx_t_6; /* "gensim/models/nmf_pgd.pyx":45 * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] # <<<<<<<<<<<<<< * * grad = -Wtv[component_idx_1, sample_idx] */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_v_component_idx_1 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_permutation.data) + __pyx_t_7)) ))); /* "gensim/models/nmf_pgd.pyx":47 * component_idx_1 = permutation[component_idx_1] * * grad = -Wtv[component_idx_1, sample_idx] # <<<<<<<<<<<<<< * * for component_idx_2 in range(n_components): */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_t_8 = __pyx_v_sample_idx; __pyx_v_grad = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Wtv.data + __pyx_t_7 * __pyx_v_Wtv.strides[0]) ) + __pyx_t_8 * __pyx_v_Wtv.strides[1]) )))); /* "gensim/models/nmf_pgd.pyx":49 * grad = -Wtv[component_idx_1, sample_idx] * * for component_idx_2 in range(n_components): # <<<<<<<<<<<<<< * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * */ __pyx_t_9 = __pyx_v_n_components; __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_component_idx_2 = __pyx_t_11; /* "gensim/models/nmf_pgd.pyx":50 * * for component_idx_2 in range(n_components): * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] # <<<<<<<<<<<<<< * * hessian = WtW[component_idx_1, component_idx_1] */ __pyx_t_8 = __pyx_v_component_idx_1; __pyx_t_7 = __pyx_v_component_idx_2; __pyx_t_12 = __pyx_v_component_idx_2; __pyx_t_13 = __pyx_v_sample_idx; __pyx_v_grad = (__pyx_v_grad + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_8 * __pyx_v_WtW.strides[0]) )) + __pyx_t_7)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_12 * __pyx_v_h.strides[0]) )) + __pyx_t_13)) ))))); } /* "gensim/models/nmf_pgd.pyx":52 * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * * hessian = WtW[component_idx_1, component_idx_1] # <<<<<<<<<<<<<< * * grad = grad * kappa / hessian */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_12 = __pyx_v_component_idx_1; __pyx_v_hessian = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_13 * __pyx_v_WtW.strides[0]) )) + __pyx_t_12)) ))); /* "gensim/models/nmf_pgd.pyx":54 * hessian = WtW[component_idx_1, component_idx_1] * * grad = grad * kappa / hessian # <<<<<<<<<<<<<< * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad */ __pyx_v_grad = ((__pyx_v_grad * __pyx_v_kappa) / __pyx_v_hessian); /* "gensim/models/nmf_pgd.pyx":56 * grad = grad * kappa / hessian * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad # <<<<<<<<<<<<<< * * violation += projected_grad * projected_grad */ __pyx_t_12 = __pyx_v_component_idx_1; __pyx_t_13 = __pyx_v_sample_idx; if ((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_12 * __pyx_v_h.strides[0]) )) + __pyx_t_13)) ))) == 0.0) != 0)) { __pyx_t_14 = __pyx_f_6gensim_6models_7nmf_pgd_fmin(0.0, __pyx_v_grad); } else { __pyx_t_14 = __pyx_v_grad; } __pyx_v_projected_grad = __pyx_t_14; /* "gensim/models/nmf_pgd.pyx":58 * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad * * violation += projected_grad * projected_grad # <<<<<<<<<<<<<< * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) */ __pyx_v_violation = (__pyx_v_violation + (__pyx_v_projected_grad * __pyx_v_projected_grad)); /* "gensim/models/nmf_pgd.pyx":60 * violation += projected_grad * projected_grad * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) # <<<<<<<<<<<<<< * * return sqrt(violation) */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_12 = __pyx_v_sample_idx; __pyx_t_7 = __pyx_v_component_idx_1; __pyx_t_8 = __pyx_v_sample_idx; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_7 * __pyx_v_h.strides[0]) )) + __pyx_t_8)) )) = __pyx_f_6gensim_6models_7nmf_pgd_fmax(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_13 * __pyx_v_h.strides[0]) )) + __pyx_t_12)) ))) - __pyx_v_grad), 0.); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "gensim/models/nmf_pgd.pyx":62 * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) * * return sqrt(violation) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_15 = PyFloat_FromDouble(sqrt(__pyx_v_violation)); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __pyx_r = __pyx_t_15; __pyx_t_15 = 0; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_15); __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_h, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Wtv, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_WtW, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_permutation, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_nmf_pgd(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_nmf_pgd}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "nmf_pgd", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_WtW, __pyx_k_WtW, sizeof(__pyx_k_WtW), 0, 0, 1, 1}, {&__pyx_n_s_Wtv, __pyx_k_Wtv, sizeof(__pyx_k_Wtv), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_1, __pyx_k_component_idx_1, sizeof(__pyx_k_component_idx_1), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_2, __pyx_k_component_idx_2, sizeof(__pyx_k_component_idx_2), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_gensim_models_nmf_pgd, __pyx_k_gensim_models_nmf_pgd, sizeof(__pyx_k_gensim_models_nmf_pgd), 0, 0, 1, 1}, {&__pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_k_gensim_models_nmf_pgd_pyx, sizeof(__pyx_k_gensim_models_nmf_pgd_pyx), 0, 0, 1, 0}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_grad, __pyx_k_grad, sizeof(__pyx_k_grad), 0, 0, 1, 1}, {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, {&__pyx_n_s_hessian, __pyx_k_hessian, sizeof(__pyx_k_hessian), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_kappa, __pyx_k_kappa, sizeof(__pyx_k_kappa), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_n_components, __pyx_k_n_components, sizeof(__pyx_k_n_components), 0, 0, 1, 1}, {&__pyx_n_s_n_samples, __pyx_k_n_samples, sizeof(__pyx_k_n_samples), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_projected_grad, __pyx_k_projected_grad, sizeof(__pyx_k_projected_grad), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_sample_idx, __pyx_k_sample_idx, sizeof(__pyx_k_sample_idx), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_solve_h, __pyx_k_solve_h, sizeof(__pyx_k_solve_h), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violation, __pyx_k_violation, sizeof(__pyx_k_violation), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_tuple__19 = PyTuple_Pack(14, __pyx_n_s_h, __pyx_n_s_Wtv, __pyx_n_s_WtW, __pyx_n_s_permutation, __pyx_n_s_kappa, __pyx_n_s_n_components, __pyx_n_s_n_samples, __pyx_n_s_violation, __pyx_n_s_grad, __pyx_n_s_projected_grad, __pyx_n_s_hessian, __pyx_n_s_sample_idx, __pyx_n_s_component_idx_1, __pyx_n_s_component_idx_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_n_s_solve_h, 18, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 18, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initnmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initnmf_pgd(void) #else __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_nmf_pgd(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'nmf_pgd' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("nmf_pgd", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_gensim__models__nmf_pgd) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "gensim.models.nmf_pgd")) { if (unlikely(PyDict_SetItemString(modules, "gensim.models.nmf_pgd", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h, NULL, __pyx_n_s_gensim_models_nmf_pgd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_solve_h, __pyx_t_1) < 0) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "gensim/models/nmf_pgd.pyx":1 * # Author: Timofey Yefimov # <<<<<<<<<<<<<< * * # cython: cdivision=True */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init gensim.models.nmf_pgd", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init gensim.models.nmf_pgd"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const char neg_one = (char) -1, const_zero = (char) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_unaryop__ainv_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint16 // op(A') function: GB_tran__ainv_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint16 ( int16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
objective_function.h
#pragma once #ifndef OPTIMIZATION_LIB_OBJECTIVE_FUNCTION_H #define OPTIMIZATION_LIB_OBJECTIVE_FUNCTION_H // STL Includes #include <memory> #include <atomic> #include <string> #include <mutex> #include <any> #include <limits> // Eigen Includes #include <Eigen/Core> #include <Eigen/Sparse> // Optimization Lib Includes #include "../core/core.h" #include "../core/updatable_object.h" #include "./objective_function_base.h" #include "../data_providers/data_provider.h" template<Eigen::StorageOptions StorageOrder_, typename VectorType_> class ObjectiveFunction : public ObjectiveFunctionBase { public: /** * Public type definitions */ enum TemplateSettings { StorageOrder = StorageOrder_ }; /** * Constructor and destructor */ ObjectiveFunction(const std::shared_ptr<MeshDataProvider>& mesh_data_provider, const std::shared_ptr<DataProvider>& data_provider, const std::string& name) : ObjectiveFunctionBase(mesh_data_provider), f_(0), w_(1), name_(name), data_provider_(data_provider) { if (std::dynamic_pointer_cast<EmptyDataProvider>(data_provider_) == nullptr) { this->dependencies_.push_back(data_provider_); } } virtual ~ObjectiveFunction() { // Empty implementation } /** * Getters */ double GetValue() const { return f_; } const VectorType_& GetValuePerVertex() const { return f_per_vertex_; } const Eigen::VectorXd& GetImageValuePerEdge() const { return image_value_per_edge_; } const Eigen::VectorXd& GetDomainValuePerEdge() const { return domain_value_per_edge_; } const VectorType_& GetGradient() const { return g_; } const Eigen::SparseMatrix<double, StorageOrder_>& GetHessian() { H_.setFromTriplets(triplets_.begin(), triplets_.end()); return H_; } const std::vector<Eigen::Triplet<double>>& GetTriplets() const { return triplets_; } double GetWeight() const { return w_; } std::string GetName() const { return name_; } std::shared_ptr<DataProvider> GetDataProvider() const { return data_provider_; } // Generic property getter virtual bool GetProperty(const int32_t property_id, const int32_t property_modifier_id, const std::any property_context, std::any& property_value) override { const ObjectiveFunctionBase::PropertyModifiers property_modifiers = static_cast<ObjectiveFunctionBase::PropertyModifiers>(property_modifier_id); const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Value: property_value = GetValue(); return true; case Properties::ValuePerVertex: property_value = GetValuePerVertex(); return true; case Properties::ValuePerEdge: property_value = GetValuePerEdge(property_modifiers); return true; case Properties::Gradient: property_value = GetGradient(); return true; case Properties::Hessian: property_value = GetHessian(); return true; case Properties::GradientNorm: property_value = GetGradient().norm(); return true; case Properties::Weight: property_value = GetWeight(); return true; case Properties::Name: property_value = GetName(); return true; } return false; } /** * Setters */ void SetWeight(const double w) { w_ = w; } // Generic property setter virtual bool SetProperty(const int32_t property_id, const std::any property_context, const std::any property_value) override { const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Weight: SetWeight(std::any_cast<const double&>(property_value)); return true; } return false; } /** * Public methods */ // Initializes the objective function object. Must be called from any derived class constructor. void Initialize() { std::lock_guard<std::mutex> lock(mutex_); PreInitialize(); InitializeValue(f_); InitializeValuePerVertex(f_per_vertex_); InitializeValuePerEdge(image_value_per_edge_, domain_value_per_edge_); InitializeGradient(g_); InitializeHessian(H_); InitializeTriplets(triplets_); PostInitialize(); UpdatableObject::Initialize(); } // Update value, gradient and hessian for a given x void Update(const Eigen::VectorXd& x) override { Update(x, static_cast<int32_t>(UpdateOptions::All)); } void Update(const Eigen::VectorXd& x, const int32_t update_modifiers) override { PreUpdate(x); const UpdateOptions update_options = static_cast<UpdateOptions>(update_modifiers); if ((update_options & UpdateOptions::Value) != UpdateOptions::None) { CalculateValue(f_); } if ((update_options & UpdateOptions::ValuePerVertex) != UpdateOptions::None) { CalculateValuePerVertex(f_per_vertex_); } if ((update_options & UpdateOptions::ValuePerEdge) != UpdateOptions::None) { CalculateValuePerEdge(domain_value_per_edge_, image_value_per_edge_); } if ((update_options & UpdateOptions::Gradient) != UpdateOptions::None) { CalculateGradient(g_); } if ((update_options & UpdateOptions::Hessian) != UpdateOptions::None) { CalculateTriplets(triplets_); } PostUpdate(x); } void UpdateLayers(const Eigen::VectorXd& x) { UpdateLayers(x, UpdateOptions::All); } void UpdateLayers(const Eigen::VectorXd& x, const UpdateOptions update_options) { std::lock_guard<std::mutex> lock(mutex_); int32_t update_modifiers = static_cast<int32_t>(update_options); const auto layers_count = dependency_layers_.size(); for(std::size_t current_layer_index = 0; current_layer_index < layers_count; current_layer_index++) { const auto& current_layer = dependency_layers_[current_layer_index]; const auto objects_count = current_layer.size(); // HACK: Remove this 'if' branch once Separation and Symmetric Dirichlet // objectives are composed as sum of sub-objectives if(current_layer_index == 0) { #pragma omp parallel for for (long i = 1; i < objects_count; i++) { current_layer[i]->Update(x, update_modifiers); } if (objects_count > 0) { current_layer[0]->Update(x, update_modifiers); } //if (objects_count > 1) //{ // current_layer[1]->Update(x, update_modifiers); //} } else { #pragma omp parallel for for (long i = 0; i < objects_count; i++) { current_layer[i]->Update(x, update_modifiers); } } } Update(x, update_modifiers); } template<typename ValueVectorType_> void AddValuePerVertex(ValueVectorType_& f_per_vertex, const double w = 1) const { f_per_vertex = f_per_vertex + w * f_per_vertex_; } template<typename GradientVectorType_> void AddGradient(GradientVectorType_& g, const double w = 1) const { g = g + w * g_; } template<typename ValueVectorType_> void AddValuePerVertexSafe(ValueVectorType_& f_per_vertex, const double w = 1) const { AddValuePerVertex(f_per_vertex, w); } template<typename GradientVectorType_> void AddGradientSafe(GradientVectorType_& g, const double w = 1) const { AddGradient(g, w); } virtual void AddTriplets(std::vector<Eigen::Triplet<double>>& triplets, const double w = 1) const { const int64_t start_index = triplets.size(); const int64_t end_index = start_index + triplets_.size(); triplets.insert(triplets.end(), triplets_.begin(), triplets_.end()); for(int64_t i = start_index; i < end_index; i++) { double& value = const_cast<double&>(triplets[i].value()); value *= w; } } /** * Gradient and hessian approximation using finite differences */ template<Eigen::StorageOptions StorageOrder_, typename VectorType_> static Eigen::VectorXd GetApproximatedGradient(const std::shared_ptr<ObjectiveFunction<StorageOrder_, VectorType_>>& objective_function, const Eigen::VectorXd& x) { Eigen::VectorXd g(x.rows()); g.setZero(); Eigen::VectorXd perturbation(x.rows()); const double epsilon = CalculateEpsilon(x); const double epsilon2 = 2 * epsilon; for (int64_t i = 0; i < x.rows(); i++) { perturbation.setZero(); perturbation.coeffRef(i) = epsilon; Eigen::VectorXd x_plus_eps = x + perturbation; Eigen::VectorXd x_minus_eps = x - perturbation; objective_function->UpdateLayers(x_plus_eps); const double value_plus = objective_function->GetValue(); objective_function->UpdateLayers(x_minus_eps); const double value_minus = objective_function->GetValue(); g.coeffRef(i) = (value_plus - value_minus) / epsilon2; } return g; } template<Eigen::StorageOptions StorageOrder_, typename VectorType_> static Eigen::MatrixXd GetApproximatedHessian(const std::shared_ptr<ObjectiveFunction<StorageOrder_, VectorType_>>& objective_function, const Eigen::VectorXd& x) { Eigen::MatrixXd H(x.rows(), x.rows()); H.setZero(); Eigen::VectorXd perturbation(x.rows()); const double epsilon = CalculateEpsilon(x); const double epsilon2 = 2 * epsilon; for (int64_t i = 0; i < x.rows(); i++) { perturbation.setZero(); perturbation.coeffRef(i) = epsilon; Eigen::VectorXd x_plus_eps = x + perturbation; Eigen::VectorXd x_minus_eps = x - perturbation; objective_function->UpdateLayers(x_plus_eps); const Eigen::VectorXd g_plus = objective_function->GetGradient(); objective_function->UpdateLayers(x_minus_eps); const Eigen::VectorXd g_minus = objective_function->GetGradient(); H.row(i) = (g_plus - g_minus) / epsilon2; } return H; } protected: /** * Protected methods */ virtual void InitializeDependencyLayers() { } virtual void PreInitialize() { // Empty implementation } virtual void PostInitialize() { // Empty implementation } virtual void PreUpdate(const Eigen::VectorXd& x) { // Empty implementation } virtual void PostUpdate(const Eigen::VectorXd& x) { // Empty implementation } /** * Protected fields */ // Mutex mutable std::mutex m_; // Data provider std::shared_ptr<DataProvider> data_provider_; private: /** * Private getters */ const Eigen::VectorXd& GetValuePerEdge(const ObjectiveFunctionBase::PropertyModifiers property_modifiers) const { switch (property_modifiers) { case ObjectiveFunctionBase::PropertyModifiers::Domain: return GetDomainValuePerEdge(); case ObjectiveFunctionBase::PropertyModifiers::Image: return GetImageValuePerEdge(); } } /** * Private methods */ // Value, gradient and hessian initializers void InitializeValue(double& f) { f = 0; } void InitializeValuePerVertex(VectorType_& f_per_vertex) { f_per_vertex.resize(mesh_data_provider_->GetImageVerticesCount()); f_per_vertex.setZero(); } void InitializeValuePerEdge(Eigen::VectorXd& image_value_per_edge, Eigen::VectorXd& domain_value_per_edge) { domain_value_per_edge.resize(mesh_data_provider_->GetDomainEdgesCount()); domain_value_per_edge.setZero(); image_value_per_edge.resize(mesh_data_provider_->GetImageEdgesCount()); image_value_per_edge.setZero(); } void InitializeGradient(VectorType_& g) { g.resize(mesh_data_provider_->GetVariablesCount()); g.setZero(); } void InitializeHessian(Eigen::SparseMatrix<double, StorageOrder_>& H) { auto variables_count = mesh_data_provider_->GetVariablesCount(); H.resize(variables_count, variables_count); } virtual void InitializeTriplets(std::vector<Eigen::Triplet<double>>& triplets) = 0; // Value, gradient and hessian calculation functions virtual void CalculateValue(double& f) = 0; virtual void CalculateValuePerVertex(VectorType_& f_per_vertex) = 0; // TODO: Should be moved to a new class that will be a super class for edge related objective functions virtual void CalculateValuePerEdge(Eigen::VectorXd& domain_value_per_edge, Eigen::VectorXd& image_value_per_edge) = 0; virtual void CalculateGradient(VectorType_& g) = 0; virtual void CalculateTriplets(std::vector<Eigen::Triplet<double>>& triplets) = 0; // Epsilon calculation for finite differences static double CalculateEpsilon(const Eigen::VectorXd& x) { const double machine_epsilon = std::numeric_limits<double>::epsilon(); const double max = x.cwiseAbs().maxCoeff(); return std::cbrt(machine_epsilon) * max; } /** * Private fields */ // Mutex mutable std::mutex mutex_; // Value double f_; // Value per vertex VectorType_ f_per_vertex_; // Value per edge // TODO: Use generic VectorType_ Eigen::VectorXd image_value_per_edge_; Eigen::VectorXd domain_value_per_edge_; // Gradient VectorType_ g_; // Triplets std::vector<Eigen::Triplet<double>> triplets_; // Hessian Eigen::SparseMatrix<double, StorageOrder_> H_; // Weight double w_; // Name const std::string name_; }; #endif
8091.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop { /* E := A*B */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
ordered_processing.h
#ifndef ORDERED_PROCESSING_H_ #define ORDERED_PROCESSING_H_ #include "graph.h" #include "eager_priority_queue.h" using namespace std; const size_t kMaxBin = std::numeric_limits<size_t>::max()/2; template <typename PriorityT_> struct updatePriorityMin { void operator()(EagerPriorityQueue<PriorityT_>* pq, vector<vector<NodeID> >& local_bins, NodeID dst, PriorityT_ old_val, PriorityT_ new_val){ if (new_val < old_val) { bool changed_dist = true; while (!compare_and_swap(pq->priorities_[dst], old_val, new_val)) { old_val = pq->priorities_[dst]; if (old_val <= new_val) { changed_dist = false; break; } } if (changed_dist) { // assume the priority is mapped to a bin using delta size_t dest_bin; if (pq->delta_ != 1) dest_bin = new_val/pq->delta_; else dest_bin = new_val; if (dest_bin >= local_bins.size()) { local_bins.resize(dest_bin+1); } local_bins[dest_bin].push_back(dst); } } } }; template< class Priority, class EdgeApplyFunc , class WhileCond> void OrderedProcessingOperatorNoMerge(EagerPriorityQueue<Priority>* pq, const WGraph &g, WhileCond while_cond, EdgeApplyFunc edge_apply, NodeID optional_source_node){ pvector<NodeID> frontier(g.num_edges_directed()); // two element arrays for double buffering curr=iter&1, next=(iter+1)&1 //size_t shared_indexes[2] = {0, kMaxBin}; //size_t frontier_tails[2] = {1, 0}; pq->init_indexes_tails(); //optional source node frontier[0] = optional_source_node; // int round = 0; #pragma omp parallel { vector<vector<NodeID> > local_bins(0); size_t iter = 0; while (while_cond()) { //TODO: refactor to use user supplied // while (user_supplied_condition()) // size_t &curr_bin_index = shared_indexes[iter&1]; // size_t &next_bin_index = shared_indexes[(iter+1)&1]; // size_t &curr_frontier_tail = frontier_tails[iter&1]; // size_t &next_frontier_tail = frontier_tails[(iter+1)&1]; size_t &curr_bin_index = pq->shared_indexes[iter&1]; size_t &next_bin_index = pq->shared_indexes[(iter+1)&1]; size_t &curr_frontier_tail = pq->frontier_tails[iter&1]; size_t &next_frontier_tail = pq->frontier_tails[(iter+1)&1]; // Used for debugging // round++; // std::cout << " round: " << round << std::endl; // std::cout << " frontier size: " << curr_frontier_tail << std::endl; // for (size_t i=0; i < curr_frontier_tail; i++) { // NodeID u = frontier[i]; // std::cout << u << " "; // } // std::cout << std::endl; #pragma omp for nowait schedule(dynamic, 64) for (size_t i=0; i < curr_frontier_tail; i++) { NodeID u = frontier[i]; //TODO: need to refactor to use user supplied filtering on the source node //if (src_filter(u)) { //hard code this into the library if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } //end of if statement }//going through current frontier for end //searching for the next priority for (size_t i=pq->get_current_priority(); i < local_bins.size(); i++) { if (!local_bins[i].empty()) { #pragma omp critical next_bin_index = min(next_bin_index, i); break; } } #pragma omp barrier #pragma omp single nowait { //t.Stop(); //PrintStep(curr_bin_index, t.Millisecs(), curr_frontier_tail); // t.Start(); curr_bin_index = kMaxBin; curr_frontier_tail = 0; // need to make srue we increment it from only one thread pq->increment_iter(); } if (next_bin_index < local_bins.size()) { size_t copy_start = fetch_and_add(next_frontier_tail, local_bins[next_bin_index].size()); copy(local_bins[next_bin_index].begin(), local_bins[next_bin_index].end(), frontier.data() + copy_start); local_bins[next_bin_index].resize(0); } iter++; #pragma omp barrier } //#pragma omp single //cout << "order processing took " << iter << " iterations" << endl; }//end of pragma omp parallel } template<class Priority, class WhileCond, class EdgeApplyFunc > void OrderedProcessingOperatorWithMerge(EagerPriorityQueue<Priority>* pq, const WGraph &g, WhileCond while_cond, EdgeApplyFunc edge_apply, int bin_size_threshold = 1000, NodeID optional_source_node=-1){ pvector<NodeID> frontier(g.num_edges_directed()); // two element arrays for double buffering curr=iter&1, next=(iter+1)&1 //size_t shared_indexes[2] = {0, kMaxBin}; //size_t frontier_tails[2] = {1, 0}; pq->init_indexes_tails(); //optional source node frontier[0] = optional_source_node; #pragma omp parallel { vector<vector<NodeID> > local_bins(0); size_t iter = 0; while (while_cond()) { //TODO: refactor to use user supplied // while (user_supplied_condition()) // size_t &curr_bin_index = shared_indexes[iter&1]; // size_t &next_bin_index = shared_indexes[(iter+1)&1]; // size_t &curr_frontier_tail = frontier_tails[iter&1]; // size_t &next_frontier_tail = frontier_tails[(iter+1)&1]; size_t &curr_bin_index = pq->shared_indexes[iter&1]; size_t &next_bin_index = pq->shared_indexes[(iter+1)&1]; size_t &curr_frontier_tail = pq->frontier_tails[iter&1]; size_t &next_frontier_tail = pq->frontier_tails[(iter+1)&1]; #pragma omp for nowait schedule(dynamic, 64) for (size_t i=0; i < curr_frontier_tail; i++) { NodeID u = frontier[i]; //TODO: need to refactor to use user supplied filtering on the source node //if (src_filter(u)) { if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } //end of if statement }//going through current frontier for end while (local_bins.size() > 0 && curr_bin_index < local_bins.size() && !local_bins[curr_bin_index].empty()){ size_t cur_bin_size = local_bins[curr_bin_index].size(); if (cur_bin_size > bin_size_threshold) break; vector<NodeID> cur_bin_copy = local_bins[curr_bin_index]; local_bins[curr_bin_index].resize(0); for (size_t i=0; i < cur_bin_size; i++) { NodeID u = cur_bin_copy[i]; //if (src_filter(u)) { if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } } } //searching for the next priority for (size_t i=pq->get_current_priority(); i < local_bins.size(); i++) { if (!local_bins[i].empty()) { #pragma omp critical next_bin_index = min(next_bin_index, i); break; } } #pragma omp barrier #pragma omp single nowait { //t.Stop(); //PrintStep(curr_bin_index, t.Millisecs(), curr_frontier_tail); // t.Start(); curr_bin_index = kMaxBin; curr_frontier_tail = 0; // need to make srue we increment it from only one thread pq->increment_iter(); } if (next_bin_index < local_bins.size()) { size_t copy_start = fetch_and_add(next_frontier_tail, local_bins[next_bin_index].size()); copy(local_bins[next_bin_index].begin(), local_bins[next_bin_index].end(), frontier.data() + copy_start); local_bins[next_bin_index].resize(0); } iter++; #pragma omp barrier } //#pragma omp single //cout << "took " << iter << " iterations" << endl; }//end of pragma omp parallel } #endif // ORDERED_PROCESSING_H
dger_2_save.c
#define max(a,b) (((a) < (b))? (b) : (a)) #define min(a,b) (((a) < (b))? (a) : (b)) #include <omp.h> #define _NB_1 1 void dger(const int M,const int N,const double alpha,const double* X,const int incX,const double* Y,const int incY,double* A,const int lda) { int i; int j; int j_bk_1; int j_bk_2; int i_bk_3; double _X_cp_0; double _Y_cp_0; double _Y_cp_1; double _Y_cp_2; double _Y_cp_3; omp_set_num_threads(16); #pragma omp parallel { /*@;BEGIN(nest1_group3=Nest)@*/#pragma omp for private(i,j,j_bk_1,j_bk_2,i_bk_3,_X_cp_0,_Y_cp_0,_Y_cp_1,_Y_cp_2,_Y_cp_3) for (j_bk_1=0; j_bk_1<N; j_bk_1+=_NB_1) { /*@;BEGIN(nest1_group2=Nest)@*/for (j_bk_2=0; j_bk_2<-31+min(_NB_1,N-j_bk_1); j_bk_2+=32) { /*@;BEGIN(nest2_group2=Nest)@*/for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { /*@;BEGIN(nest1=Nest)@*/for (j=0; j<32; j+=4) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; _Y_cp_1 = Y[j_bk_1*incY+(j_bk_2*incY+(incY+j*incY))]; _Y_cp_2 = Y[j_bk_1*incY+(j_bk_2*incY+(2*incY+j*incY))]; _Y_cp_3 = Y[j_bk_1*incY+(j_bk_2*incY+(3*incY+j*incY))]; /*@;BEGIN(nest2=Nest)@*/for (i=0; i<32; i+=4) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))]+_X_cp_0*_Y_cp_1; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))]+_X_cp_0*_Y_cp_2; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(1+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i)))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(2+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i)))))]+_X_cp_0*_Y_cp_3; _X_cp_0 = X[i_bk_3+(3+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))]+_X_cp_0*_Y_cp_0; A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_1; A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(2*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_2; A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))] = A[j*lda+(3*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i)))))]+_X_cp_0*_Y_cp_3; } } } if (i_bk_3<M) { for (j=0; j<32; j+=4) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; _Y_cp_1 = Y[j_bk_1*incY+(j_bk_2*incY+(incY+j*incY))]; _Y_cp_2 = Y[j_bk_1*incY+(j_bk_2*incY+(2*incY+j*incY))]; _Y_cp_3 = Y[j_bk_1*incY+(j_bk_2*incY+(3*incY+j*incY))]; for (i=0; i<M-i_bk_3; i+=1) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(lda+j*lda))))]+_X_cp_0*_Y_cp_1; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(2*lda+j*lda))))]+_X_cp_0*_Y_cp_2; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+(3*lda+j*lda))))]+_X_cp_0*_Y_cp_3; } } } } if (j_bk_2<min(_NB_1,N-j_bk_1)) { for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32) { for (j=0; j<min(_NB_1-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; for (i=0; i<32; i+=4) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(1+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(1+i))))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(2+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(2+i))))]+_X_cp_0*_Y_cp_0; _X_cp_0 = X[i_bk_3+(3+i)]; A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))] = A[j*lda+(j_bk_2*lda+(j_bk_1*lda+(i_bk_3+(3+i))))]+_X_cp_0*_Y_cp_0; } } } if (i_bk_3<M) { for (j=0; j<min(_NB_1-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1) { _Y_cp_0 = Y[j_bk_1*incY+(j_bk_2*incY+j*incY)]; for (i=0; i<M-i_bk_3; i+=1) { _X_cp_0 = X[i_bk_3+i]; A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))] = A[i+(i_bk_3+(j_bk_1*lda+(j_bk_2*lda+j*lda)))]+_X_cp_0*_Y_cp_0; } } } } } } }
GB_unop__minv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint32_uint32 // op(A') function: GB_unop_tran__minv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_sxc_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t unsigned int typedef struct { uint32_t length; uint8_t v[20]; // hash of password } sxc_password; typedef struct { uint32_t v[16/4]; } sxc_hash; typedef struct { uint8_t length; uint8_t salt[32]; uint32_t iterations; uint32_t outlen; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize; #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define OCL_CONFIG "sxc" #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(sizeof(*saved_key) * gws); crypt_out = mem_calloc(sizeof(*crypt_out) * gws); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$", 5)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* original length */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res > 1024) goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.original_length = atoi(p); p = strtok(NULL, "*"); cs.length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtok(ctcopy, "*"); strtok(NULL, "*"); strtok(NULL, "*"); strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } #undef set_key static void set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } /// Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } #endif struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sxc_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__bshift_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_int32 // A.*B function (eWiseMult): GB_AemultB__bshift_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_int32 // C+=b function (dense accum): GB_Cdense_accumb__bshift_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int32 // C=scalar+B GB_bind1st__bshift_int32 // C=scalar+B' GB_bind1st_tran__bshift_int32 // C=A+scalar GB_bind2nd__bshift_int32 // C=A'+scalar GB_bind2nd_tran__bshift_int32 // C type: int32_t // A type: int32_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_int32 (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_bitshift_int32 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT32 || GxB_NO_BSHIFT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bshift_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_int32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = GB_bitshift_int32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int32 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int32 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint64) // A*D function (colscale): GB (_AxD__rminus_uint64) // D*A function (rowscale): GB (_DxB__rminus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint64) // C=scalar+B GB (_bind1st__rminus_uint64) // C=scalar+B' GB (_bind1st_tran__rminus_uint64) // C=A+scalar GB (_bind2nd__rminus_uint64) // C=A'+scalar GB (_bind2nd_tran__rminus_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_single_private.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int myit = 0; #pragma omp threadprivate(myit) int myresult = 0; #pragma omp threadprivate(myresult) int test_omp_single_private() { int nr_threads_in_single; int result; int nr_iterations; int i; myit = 0; nr_threads_in_single = 0; nr_iterations = 0; result = 0; #pragma omp parallel private(i) { myresult = 0; myit = 0; for (i = 0; i < LOOPCOUNT; i++) { #pragma omp single private(nr_threads_in_single) nowait { nr_threads_in_single = 0; #pragma omp flush nr_threads_in_single++; #pragma omp flush myit++; myresult = myresult + nr_threads_in_single; } } #pragma omp critical { result += nr_threads_in_single; nr_iterations += myit; } } return ((result == 0) && (nr_iterations == LOOPCOUNT)); } /* end of check_single private */ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_single_private()) { num_failed++; } } return num_failed; }
a12.c
#define N 100000000 #define MAX 4 int a[N],b[N],ind[N]; long long s=0; main() { int i; /* inicialitzacio, no en paral.lel */ for(i=0;i<N;i++) { a[i]=1; b[i]=2; ind[i]=i%MAX; } #pragma omp parallel for schedule(static,1) for (i=0;i<N;i++) b[ind[i]] += a[i]; for (i=0;i<MAX;i++) { printf("Valor %d, de b %d \n",i,b[i]); s+=b[i]; } printf("Suma total de b: %ld\n",s); }
indicator_kriging.h
/* Copyright 2009 HPGL Team This file is part of HPGL (High Perfomance Geostatistics Library). HPGL is free software: you can redistribute it and/or modify it under the terms of the BSD License. You should have received a copy of the BSD License along with HPGL. */ #ifndef __GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__ #define __GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__ #include <ik_params.h> #include <sugarbox_grid.h> #include "progress_reporter.h" #include "cdf_utils.h" #include "pretty_printer.h" #include "my_kriging_weights.h" #include "precalculated_covariance.h" #include "kriging_interpolation.h" #include "neighbourhood_lookup.h" #include "is_informed_predicate.h" #include "cov_model.h" namespace hpgl { typedef std::vector<indicator_probability_t> marginal_probs_t; /*template< typename grid_t, typename data_t, typename means_t, typename weight_calculator_t>*/ namespace detail { template<typename cov_t> void create_precalucated_cov_models(const ik_params_t & params, std::vector<cov_t> & result) { result.resize(params.m_category_count); for (int i = 0; i < params.m_category_count; ++i) { result[i].init(cov_model_t(params.m_cov_params[i]), params.m_radiuses[i]); } } template<typename nl_t, typename prop_t> void add_defined_cells(std::vector<nl_t> & neighbour_lookups, const prop_t & prop) { for (int node = 0, end_node = prop.size(); node < end_node; ++node) { if (prop.is_informed(node)) { for (int i = 0, end_i = neighbour_lookups.size(); i < end_i; ++i) { neighbour_lookups[i].add_node(node); } } } } } template<typename grid_t, typename marginal_probs_t> void do_indicator_kriging( const indicator_property_array_t & input_property, const grid_t & grid, const ik_params_t & params, const marginal_probs_t & mps, indicator_property_array_t & output_property) { using namespace hpgl::detail; print_algo_name("Indicator Kriging"); print_params(params); progress_reporter_t report(grid.size()); typedef precalculated_covariances_t cov_t; std::vector<cov_t> covariances; create_precalucated_cov_models(params, covariances); typedef indexed_neighbour_lookup_t<grid_t, cov_t> nl_t; std::vector<nl_t> nblookups; for (int i = 0; i < params.m_category_count; ++i) { nblookups.push_back(nl_t(&grid, &covariances[i], params.m_nb_params[i])); } add_defined_cells(nblookups, input_property); std::vector<indicator_array_adapter_t> ind_props; for (int i = 0; i < params.m_category_count; ++i) { ind_props.push_back(indicator_array_adapter_t(&input_property, i)); } report.start(); size_t size = input_property.size(); #pragma omp parallel { #pragma omp for for (node_index_t node_idx = 0; node_idx < size; ++node_idx) { std::vector<indicator_probability_t> probs; for (int idx = 0; idx < params.m_category_count; ++idx) { indicator_probability_t prob; ki_result_t ki_result = kriging_interpolation(ind_props[idx], is_informed_predicate_t<indicator_property_array_t>(input_property), node_idx, covariances[idx], mps[idx], nblookups[idx], sk_weight_calculator_t(), prob); if (ki_result != KI_SUCCESS) { prob = mps[idx][node_idx]; } probs.push_back(prob); } output_property.set_at(node_idx, most_probable_category(probs)); #pragma omp critical { report.next_lap(); } } } report.stop(); std::cout << "\nDone. Average speed: " << report.iterations_per_second() << " point/sec." << std::endl; } } #endif //__GSALGO_INDICATOR_KRIGING_COMMAND_H__56F6BFCF_ACC9_4F63_A197_57316908E436__
GB_unop__identity_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__max_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp64) // A*D function (colscale): GB (_AxD__max_fp64) // D*A function (rowscale): GB (_DxB__max_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__max_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__max_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp64) // C=scalar+B GB (_bind1st__max_fp64) // C=scalar+B' GB (_bind1st_tran__max_fp64) // C=A+scalar GB (_bind2nd__max_fp64) // C=A'+scalar GB (_bind2nd_tran__max_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = fmax (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmax (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = fmax (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = fmax (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmax (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmax (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
support.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> #include "support.h" bool isEqualArray(int* arr1, int *arr2, int size) { bool isResultCorrect = true; int i = 0; while((isResultCorrect == true) && (i < size)) { if(arr1[i] != arr2[i]) { isResultCorrect = false; } i++; } return isResultCorrect; } void initializeArrays(int* arr1, int* arr2, int* arr3, int size, int minimumNumber, int maximumNumber) { int i; srand(time(NULL)); printf("Initializing the arrays...\n"); #pragma omp parallel for for (i = 0; i < size; i++) { arr1[i] = minimumNumber + (rand() % maximumNumber); arr2[i] = arr1[i]; arr3[i] = arr1[i]; } printf("Complete.\n"); } void copyArray(int* src, int* dst, int size) { int i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = src[i]; } }
MICsearch.c
#include "MICsearch.h" // MIC search with KNC instructions and Adaptive Profile technique void mic_search_knc_ap_multiple_chunks (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned int query_sequences_count, unsigned long int Q, unsigned int * query_disp, unsigned long int vect_sequences_db_count, char ** chunk_b, unsigned int chunk_count, unsigned int * chunk_vect_sequences_db_count, unsigned short int ** chunk_n, unsigned int ** chunk_b_disp, unsigned long int * chunk_vD, char * submat, int open_gap, int extend_gap, int num_mics, int mic_threads, int * scores, double * workTime, unsigned short int query_length_threshold){ long int i=0; double tick; char *queryProfiles, *a, * b; unsigned short int * m, *n, sequences_db_max_length, query_sequences_max_length; unsigned int * a_disp, * b_disp = NULL, offload_max_vect_sequences_db_count=0, qp_count, sp_count; unsigned int c; unsigned long int offload_max_vD=0, * chunk_accum_vect_sequences_db_count; a = query_sequences; b = chunk_b[0]; m = query_sequences_lengths; n = chunk_n[0]; a_disp = query_disp; b_disp = chunk_b_disp[0]; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = chunk_n[chunk_count-1][chunk_vect_sequences_db_count[chunk_count-1]-1]; // calculate maximum chunk size for (i=0; i<chunk_count ; i++) offload_max_vD = (offload_max_vD > chunk_vD[i] ? offload_max_vD : chunk_vD[i]); // calculate maximum chunk sequences count for (i=0; i<chunk_count ; i++) offload_max_vect_sequences_db_count = (offload_max_vect_sequences_db_count > chunk_vect_sequences_db_count[i] ? offload_max_vect_sequences_db_count : chunk_vect_sequences_db_count[i]); // build query profile's queryProfiles = (char *)_mm_malloc(Q*BLOSUM_COLS*sizeof(char), 64); for (i=0; i<Q ; i++) memcpy(queryProfiles+i*BLOSUM_COLS,submat+a[i]*BLOSUM_COLS,BLOSUM_COLS*sizeof(char)); // calculate number of query sequences that are processed with query and score profile i = 0; while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold)) i++; qp_count = i; sp_count = query_sequences_count-qp_count; // Allocate memory for CPU buffers chunk_accum_vect_sequences_db_count = (unsigned long int *)_mm_malloc(chunk_count*sizeof(unsigned long int), 32); chunk_accum_vect_sequences_db_count[0] = 0; for (i=1; i<chunk_count ; i++) chunk_accum_vect_sequences_db_count[i] = chunk_accum_vect_sequences_db_count[i-1] + chunk_vect_sequences_db_count[i-1]; tick = dwalltime(); #pragma omp parallel default(none) shared(submat, a,queryProfiles,m,a_disp,query_sequences_count,b,n,b_disp,vect_sequences_db_count,scores, open_gap, extend_gap, qp_count,sp_count, chunk_b, chunk_n, chunk_b_disp, chunk_vD, query_sequences_max_length, sequences_db_max_length, Q, chunk_vect_sequences_db_count, chunk_count, offload_max_vD, offload_max_vect_sequences_db_count, query_length_threshold, chunk_accum_vect_sequences_db_count, mic_threads) num_threads(num_mics) { __declspec(align(64)) __m512i *row_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *maxCol_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *maxRow_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *lastCol_ptrs[MIC_MAX_NUM_THREADS]={NULL}; __declspec(align(64)) char * scoreProfile_ptrs[MIC_MAX_NUM_THREADS]={NULL}; unsigned long int offload_vD, scores_offset; unsigned int i, mic_chunks=0, * ptr_chunk_b_disp, offload_vect_sequences_db_count; int mic_no, * mic_scores; unsigned short int * ptr_chunk_n; char * ptr_chunk_b; mic_no = omp_get_thread_num(); mic_scores = (int*) _mm_malloc(query_sequences_count*(offload_max_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH)*sizeof(int), 64); // pre-allocate buffers and transfer common thread data to corresponding MIC #pragma offload_transfer target(mic:mic_no) in(submat: length(BLOSUM_ELEMS) ALLOC) in(queryProfiles: length(Q*BLOSUM_COLS) ALLOC) \ in(a: length(Q) ALLOC) in(m:length(query_sequences_count) ALLOC) in(a_disp: length(query_sequences_count) ALLOC) \ nocopy(b:length(offload_max_vD) ALLOC) nocopy(n:length(offload_max_vect_sequences_db_count) ALLOC) nocopy(b_disp: length(offload_max_vect_sequences_db_count) ALLOC) \ nocopy(mic_scores: length(query_sequences_count*offload_max_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH) ALLOC) \ nocopy(row_ptrs, maxCol_ptrs, maxRow_ptrs, lastCol_ptrs, scoreProfile_ptrs: ALLOC) // distribute database chunk between MICs using dynamic scheduling #pragma omp for schedule(dynamic) nowait for (c=0; c < chunk_count ; c++) { offload_vD = chunk_vD[c]; offload_vect_sequences_db_count = chunk_vect_sequences_db_count[c]; ptr_chunk_b = chunk_b[c]; ptr_chunk_n = chunk_n[c]; ptr_chunk_b_disp = chunk_b_disp[c]; scores_offset = chunk_accum_vect_sequences_db_count[c]; // process database chunk in MIC #pragma offload target(mic:mic_no) in(ptr_chunk_b[0:offload_vD] : into(b[0:offload_vD]) REUSE) \ in(ptr_chunk_n[0:offload_vect_sequences_db_count] : into(n[0:offload_vect_sequences_db_count]) REUSE) \ in(ptr_chunk_b_disp[0:offload_vect_sequences_db_count] : into(b_disp[0:offload_vect_sequences_db_count]) REUSE) \ out(mic_scores: length(query_sequences_count*offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH) REUSE) \ in(a: length(0) REUSE) in(m: length(0) REUSE) in(a_disp: length(0) REUSE) in(submat: length(0) REUSE) in(queryProfiles: length(0) REUSE) \ nocopy(row_ptrs, maxCol_ptrs, maxRow_ptrs, lastCol_ptrs, scoreProfile_ptrs: REUSE) #pragma omp parallel shared(c, mic_chunks, offload_vect_sequences_db_count, query_sequences_count, open_gap, extend_gap, query_sequences_max_length, sequences_db_max_length) num_threads(mic_threads) { __m512i *row, *maxCol, *maxRow, *lastCol; int * ptr_scores; char * ptr_a, * ptr_b, *ptr_b_block, * scoreProfile, * queryProfile, * ptr_scoreProfile; __declspec(align(64)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current, aux1, aux2, auxLastCol; __declspec(align(64)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap); __declspec(align(64)) __m512i v16 = _mm512_set1_epi32(16), submat_hi, submat_lo, b_values; __mmask16 mask; unsigned int tid, i, j, jj, k, disp_1, disp_2, disp_3, dim, nbb; unsigned long int t, s, q; tid = omp_get_thread_num(); // if this is the first offload, allocate auxiliary buffers if (mic_chunks == 0) { row_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64); maxCol_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64); maxRow_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64); lastCol_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64); if (query_sequences_max_length >= query_length_threshold) scoreProfile_ptrs[tid] = (char *) _mm_malloc(BLOSUM_ROWS_x_MIC_KNC_INT32_VECTOR_LENGTH*MIC_KNC_BLOCK_SIZE*sizeof(char),16); } row = row_ptrs[tid]; maxCol = maxCol_ptrs[tid]; maxRow = maxRow_ptrs[tid]; lastCol = lastCol_ptrs[tid]; scoreProfile = scoreProfile_ptrs[tid]; // calculate chunk alignments using query profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< qp_count*offload_vect_sequences_db_count; t++) { q = (qp_count-1) - (t % qp_count); s = (offload_vect_sequences_db_count-1) - (t / qp_count); queryProfile = queryProfiles + a_disp[q]*BLOSUM_COLS; ptr_b = b + b_disp[s]; ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE); for (k=0; k < nbb; k++){ // calculate dim disp_1 = k*MIC_KNC_BLOCK_SIZE; dim = (MIC_KNC_BLOCK_SIZE < n[s]-disp_1 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_1); // get b block ptr_b_block = ptr_b + disp_1*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=1; i<dim+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<dim ; i++ ) row[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); for( i = 0; i < m[q]; i++){ // previous must start in 0 previous = _mm512_setzero_epi32(); // update row[0] with lastCol elements row[0] = lastCol[i]; // load submat values corresponding to current a residue disp_1 = i*BLOSUM_COLS; #if __MIC__ submat_lo = _mm512_extload_epi32(queryProfile+disp_1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); submat_hi = _mm512_extload_epi32(queryProfile+disp_1+16, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif // store maxRow in auxiliar var aux2 = maxRow[i]; #pragma unroll(MIC_KNC_UNROLL_COUNT) for( jj=1; jj < dim+1; jj++) { //calcuate the diagonal value #if __MIC__ b_values = _mm512_extload_epi32(ptr_b_block+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif mask = _mm512_cmpge_epi32_mask(b_values,v16); aux1 = _mm512_permutevar_epi32(b_values, submat_lo); aux1 = _mm512_mask_permutevar_epi32(aux1, mask, b_values, submat_hi); current = _mm512_add_epi32(row[jj-1], aux1); // calculate current max value current = _mm512_max_epi32(current, aux2); current = _mm512_max_epi32(current, maxCol[jj]); current = _mm512_max_epi32(current, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap); maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap); aux1 = _mm512_sub_epi32(current, vopen_extend_gap); aux2 = _mm512_max_epi32(aux2, aux1); maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1); // update row buffer row[jj-1] = previous; previous = current; // update max score score = _mm512_max_epi32(score,current); } // update lastCol lastCol[i] = auxLastCol; auxLastCol = current; // update maxRow maxRow[i] = aux2; } } // store max value _mm512_store_epi32(ptr_scores, score); } // calculate database chunk alignments with score profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< sp_count*offload_vect_sequences_db_count; t++) { q = qp_count + (sp_count-1) - (t % sp_count); s = (offload_vect_sequences_db_count-1) - (t / sp_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE); for (k=0; k < nbb; k++){ // calculate dim disp_2 = k*MIC_KNC_BLOCK_SIZE; dim = (MIC_KNC_BLOCK_SIZE < n[s]-disp_2 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_2); // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=1; i<dim+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<dim ; i++ ) row[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); // build score profile disp_1 = dim*MIC_KNC_INT32_VECTOR_LENGTH; for (i=0; i<dim ;i++ ) { #if __MIC__ aux1 = _mm512_extload_epi32(ptr_b+(disp_2+i)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif disp_3 = i*MIC_KNC_INT32_VECTOR_LENGTH; #pragma unroll(MIC_KNC_UNROLL_COUNT) for (j=0; j< BLOSUM_ROWS-1; j++) { #if __MIC__ aux2 = _mm512_i32extgather_epi32(aux1, submat + j*BLOSUM_COLS, _MM_UPCONV_EPI32_SINT8 , 1, 0); _mm512_extstore_epi32(scoreProfile+disp_3+j*disp_1, aux2, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE ); #endif } } for( i = 0; i < m[q]; i++){ // previous must start in 0 previous = _mm512_setzero_epi32(); // update row[0] with lastCol elements row[0] = lastCol[i]; // calculate i displacement ptr_scoreProfile = scoreProfile + ((int)(ptr_a[i]))*disp_1; // store maxRow in auxiliar var aux2 = maxRow[i]; #pragma unroll(MIC_KNC_UNROLL_COUNT) for( jj=1; jj < dim+1; jj++) { //calcuate the diagonal value #if __MIC__ current = _mm512_add_epi32(row[jj-1], _mm512_extload_epi32(ptr_scoreProfile+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0)); #endif // calculate current max value current = _mm512_max_epi32(current, aux2); current = _mm512_max_epi32(current, maxCol[jj]); current = _mm512_max_epi32(current, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap); maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap); aux1 = _mm512_sub_epi32(current, vopen_extend_gap); aux2 = _mm512_max_epi32(aux2, aux1); maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1); // update row buffer row[jj-1] = previous; previous = current; // update max score score = _mm512_max_epi32(score,current); } // update lastCol lastCol[i] = auxLastCol; auxLastCol = current; // update maxRow maxRow[i] = aux2; } } // store max value _mm512_store_epi32(ptr_scores, score); } if (c == chunk_count-1) { _mm_free(row); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); if (query_sequences_max_length > query_length_threshold) _mm_free(scoreProfile); } } // copy scores from auxiliary buffer to final buffer for (i=0; i<query_sequences_count ; i++) memcpy(scores+(i*vect_sequences_db_count+scores_offset)*MIC_KNC_INT32_VECTOR_LENGTH,mic_scores+i*offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH,offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH*sizeof(int)); mic_chunks++; } // de-allocate MIC buffers #pragma offload_transfer target(mic:mic_no) nocopy(submat: length(0) FREE) nocopy(queryProfiles: length(0) FREE) \ nocopy(a: length(0) FREE) nocopy(m:length(0) FREE) nocopy(a_disp: length(0) FREE) \ nocopy(b:length(0) FREE) nocopy(n:length(0) FREE) nocopy(b_disp: length(0) FREE) \ nocopy(mic_scores:length(0) FREE) \ nocopy(row_ptrs, maxCol_ptrs, maxRow_ptrs, lastCol_ptrs, scoreProfile_ptrs: FREE) _mm_free(mic_scores); } *workTime = dwalltime()-tick; _mm_free(chunk_accum_vect_sequences_db_count); _mm_free(queryProfiles); } // MIC search with KNC instructions and Adaptive Profile technique void mic_search_knc_ap_single_chunk (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned int query_sequences_count, unsigned long int Q, unsigned int * query_disp, unsigned long int vect_sequences_db_count, char ** chunk_b, unsigned int chunk_count, unsigned int * chunk_vect_sequences_db_count, unsigned short int ** chunk_n, unsigned int ** chunk_b_disp, unsigned long int * chunk_vD, char * submat, int open_gap, int extend_gap, int num_mics, int mic_threads, int * scores, double * workTime, unsigned short int query_length_threshold){ long int i=0; double tick; char *a, * b, *queryProfiles; unsigned short int * m, *n, sequences_db_max_length, query_sequences_max_length; unsigned int * a_disp, * b_disp = NULL, qp_count, sp_count; unsigned int offload_vect_sequences_db_count; unsigned long int offload_vD; a = query_sequences; b = chunk_b[0]; m = query_sequences_lengths; n = chunk_n[0]; a_disp = query_disp; b_disp = chunk_b_disp[0]; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = chunk_n[chunk_count-1][chunk_vect_sequences_db_count[chunk_count-1]-1]; // build query profile's queryProfiles = (char *)_mm_malloc(Q*BLOSUM_COLS*sizeof(char), 64); for (i=0; i<Q ; i++) memcpy(queryProfiles+i*BLOSUM_COLS,submat+a[i]*BLOSUM_COLS,BLOSUM_COLS*sizeof(char)); // calculate number of query sequences that are processed with query and score profile i = 0; while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold)) i++; qp_count = i; sp_count = query_sequences_count-qp_count; tick = dwalltime(); offload_vD = chunk_vD[0]; offload_vect_sequences_db_count = chunk_vect_sequences_db_count[0]; b = chunk_b[0]; n = chunk_n[0]; b_disp = chunk_b_disp[0]; #pragma offload target(mic:0) in(submat: length(BLOSUM_ELEMS)) in(queryProfiles: length(Q*BLOSUM_COLS)) in(b: length(offload_vD)) \ in(n: length(offload_vect_sequences_db_count)) in(b_disp: length(offload_vect_sequences_db_count)) \ in(a: length(Q)) in(m: length(query_sequences_count)) in(a_disp: length(query_sequences_count)) \ out(scores: length(query_sequences_count*offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH)) #pragma omp parallel shared(offload_vect_sequences_db_count, query_sequences_count, open_gap, extend_gap, query_sequences_max_length, sequences_db_max_length) num_threads(mic_threads) { __m512i *row, *maxCol, *maxRow, *lastCol; int * ptr_scores; char * ptr_a, * ptr_b, *ptr_b_block, * scoreProfile, *queryProfile, *ptr_scoreProfile; __declspec(align(64)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current, aux1, aux2, auxLastCol; __declspec(align(64)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap); __declspec(align(64)) __m512i v16 = _mm512_set1_epi32(16), submat_hi, submat_lo, b_values; __mmask16 mask; unsigned int i, j, jj, k, disp_1, disp_2, disp_3, dim, nbb; unsigned long int t, s, q; // allocate memory for auxiliary buffers row = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64); maxCol = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64); maxRow = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64); lastCol = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64); if (query_sequences_max_length >= query_length_threshold) scoreProfile = (char *) _mm_malloc(BLOSUM_ROWS_x_MIC_KNC_INT32_VECTOR_LENGTH*MIC_KNC_BLOCK_SIZE*sizeof(char),16); // calculate chunk alignments using query profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< qp_count*offload_vect_sequences_db_count; t++) { q = (qp_count-1) - (t % qp_count); s = (offload_vect_sequences_db_count-1) - (t / qp_count); queryProfile = queryProfiles + a_disp[q]*BLOSUM_COLS; ptr_b = b + b_disp[s]; ptr_scores = scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE); for (k=0; k < nbb; k++){ // calculate dim disp_1 = k*MIC_KNC_BLOCK_SIZE; dim = (MIC_KNC_BLOCK_SIZE < n[s]-disp_1 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_1); // get b block ptr_b_block = ptr_b + disp_1*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=1; i<dim+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<dim ; i++ ) row[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); for( i = 0; i < m[q]; i++){ // previous must start in 0 previous = _mm512_setzero_epi32(); // update row[0] with lastCol elements row[0] = lastCol[i]; // load submat values corresponding to current a residue disp_1 = i*BLOSUM_COLS; #if __MIC__ submat_lo = _mm512_extload_epi32(queryProfile+disp_1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); submat_hi = _mm512_extload_epi32(queryProfile+disp_1+16, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif // store maxRow in auxiliar var aux2 = maxRow[i]; #pragma unroll(MIC_KNC_UNROLL_COUNT) for( jj=1; jj < dim+1; jj++) { //calcuate the diagonal value #if __MIC__ b_values = _mm512_extload_epi32(ptr_b_block+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif mask = _mm512_cmpge_epi32_mask(b_values,v16); aux1 = _mm512_permutevar_epi32(b_values, submat_lo); aux1 = _mm512_mask_permutevar_epi32(aux1, mask, b_values, submat_hi); current = _mm512_add_epi32(row[jj-1], aux1); // calculate current max value current = _mm512_max_epi32(current, aux2); current = _mm512_max_epi32(current, maxCol[jj]); current = _mm512_max_epi32(current, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap); maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap); aux1 = _mm512_sub_epi32(current, vopen_extend_gap); aux2 = _mm512_max_epi32(aux2, aux1); maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1); // update row buffer row[jj-1] = previous; previous = current; // update max score score = _mm512_max_epi32(score,current); } // update lastCol lastCol[i] = auxLastCol; auxLastCol = current; // update maxRow maxRow[i] = aux2; } } // store max value _mm512_store_epi32(ptr_scores, score); } // calculate chunk alignments using score profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< sp_count*offload_vect_sequences_db_count; t++) { q = qp_count + (sp_count-1) - (t % sp_count); s = (offload_vect_sequences_db_count-1) - (t / sp_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH; // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE); for (k=0; k < nbb; k++){ // calculate dim disp_2 = k*MIC_KNC_BLOCK_SIZE; dim = (MIC_KNC_BLOCK_SIZE < n[s]-disp_2 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_2); // init buffers #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=1; i<dim+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(MIC_KNC_UNROLL_COUNT) for (i=0; i<dim ; i++ ) row[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); // build score profile disp_1 = dim*MIC_KNC_INT32_VECTOR_LENGTH; for (i=0; i< dim ;i++ ) { #if __MIC__ aux1 = _mm512_extload_epi32(ptr_b+(disp_2+i)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0); #endif disp_3 = i*MIC_KNC_INT32_VECTOR_LENGTH; #pragma unroll(MIC_KNC_UNROLL_COUNT) for (j=0; j< BLOSUM_ROWS-1; j++) { #if __MIC__ aux2 = _mm512_i32extgather_epi32(aux1, submat + j*BLOSUM_COLS, _MM_UPCONV_EPI32_SINT8 , 1, 0); _mm512_extstore_epi32(scoreProfile+disp_3+j*disp_1, aux2, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE ); #endif } } for( i = 0; i < m[q]; i++){ // previous must start in 0 previous = _mm512_setzero_epi32(); // update row[0] with lastCol elements row[0] = lastCol[i]; // calculate i displacement ptr_scoreProfile = scoreProfile + ((int)(ptr_a[i]))*disp_1; // store maxRow in auxiliar var aux2 = maxRow[i]; #pragma unroll(MIC_KNC_UNROLL_COUNT) for( jj=1; jj < dim+1; jj++) { //calcuate the diagonal value #if __MIC__ current = _mm512_add_epi32(row[jj-1], _mm512_extload_epi32(ptr_scoreProfile+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0)); #endif // calculate current max value current = _mm512_max_epi32(current, aux2); current = _mm512_max_epi32(current, maxCol[jj]); current = _mm512_max_epi32(current, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap); maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap); aux1 = _mm512_sub_epi32(current, vopen_extend_gap); aux2 = _mm512_max_epi32(aux2, aux1); maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1); // update row buffer row[jj-1] = previous; previous = current; // update max score score = _mm512_max_epi32(score,current); } // update lastCol lastCol[i] = auxLastCol; auxLastCol = current; // update maxRow maxRow[i] = aux2; } } // store max value _mm512_store_epi32(ptr_scores, score); } _mm_free(row); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); if (query_sequences_max_length > query_length_threshold) _mm_free(scoreProfile); } *workTime = dwalltime()-tick; _mm_free(queryProfiles); }
if-clause.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid, threads; int a[n],suma=0,sumalocal; if(argc < 3) { fprintf(stderr,"Uso: ./ifclause iteraciones threads"); exit(-1); } threads=atoi(argv[2]); n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) { a[i] = i; } #pragma omp parallel if(n>4) default(none) num_threads(threads) \ private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; #pragma omp single { int t = omp_get_num_threads(); printf("El numero de hebras fijado es %d\n",t ); } tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } return(0); }
residualbased_elimination_builder_and_solver_with_constraints.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS /* System includes */ #include <unordered_set> #include <unordered_map> /* External includes */ /* Project includes */ #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "utilities/constraint_utilities.h" #include "input_output/logger.h" #include "utilities/builtin_timer.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolverWithConstraints * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * The system is build in the following manner. A T matrix is assembled and constant vector g is assembled too. The T matrix contains the relations of all the dofs of the system, even the nodes with no master/slave relation. Then the size is n_total x n_red * The relation u = T u_red * Then: * A_red = T^t A T * b_red = T^t (b - A g) * @todo There is a more efficient way to asemble the system, but more costly, which is the following. In this case T will be only a relation matrix between master and slave dofs. Then n_slave x n_master: us = T um + g * Separating into independent dofs, master ans slave dofs: * u = uu * um * us * A = Auu Aum Aus * Amu Amm Ams * Asu Asm Ass * b = bu * bm * bs * Finally: * A_red = Auu Aum + Aus T * Amu + T^t Asu Amm + T^t Ams^t + Ams T + T^t Ass T * b_red = bu - Aus g * bm - Ams g * * This system requires extra care and is more complicated and requires to compute the blocks properly * @author Vicente Mataix Ferrandiz */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver > class ResidualBasedEliminationBuilderAndSolverWithConstraints : public ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolverWithConstraints); /// Definition of the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BuilderAndSolverBaseType; /// Definition of the base class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; /// The definition of the current class typedef ResidualBasedEliminationBuilderAndSolverWithConstraints<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// Definition of the classes from the base class typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodeType NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; /// Set definition typedef std::unordered_set<IndexType> IndexSetType; /// Map definition typedef std::unordered_map<IndexType, IndexType> IndexMapType; /// MPC definitions typedef MasterSlaveConstraint MasterSlaveConstraintType; typedef typename MasterSlaveConstraint::Pointer MasterSlaveConstraintPointerType; typedef std::vector<IndexType> VectorIndexType; typedef Vector VectorType; ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints() : BaseType() { } /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, const bool CheckConstraintRelation = true, const bool ResetRelationMatrixEachIteration = false ) : BaseType(pNewLinearSystemSolver), mCheckConstraintRelation(CheckConstraintRelation), mResetRelationMatrixEachIteration(ResetRelationMatrixEachIteration) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverWithConstraints() override { } /** * @brief Create method * @param pNewLinearSystemSolver The linear solver for the system of equations * @param ThisParameters The configuration parameters */ typename BuilderAndSolverBaseType::Pointer Create( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetUpSystem(ModelPart& rModelPart) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpSystemWithConstraints(rModelPart); else BaseType::SetUpSystem(rModelPart); } /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildWithConstraints(pScheme, rModelPart, rA, rb); else BaseType::Build(pScheme, rModelPart, rA, rb); } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildAndSolveWithConstraints(pScheme, rModelPart, A, Dx, b); else BaseType::BuildAndSolve(pScheme, rModelPart, A, Dx, b); } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY if(rModelPart.MasterSlaveConstraints().size() > 0) BuildRHSWithConstraints(pScheme, rModelPart, b); else BaseType::BuildRHS(pScheme, rModelPart, b); KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpDofSetWithConstraints(pScheme, rModelPart); else BaseType::SetUpDofSet(pScheme, rModelPart); } /** * @brief It applies certain operations at the system of equations at the begining of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids. } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to initialize solution step.") } /** * @brief It applies certain operations at the system of equations at the end of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->FinalizeSolutionStep(r_process_info); } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to finalize solution step.") } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "elimination_builder_and_solver_with_constraints", "check_constraint_relation" : true, "reset_relation_matrix_each_iteration" : true })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "elimination_builder_and_solver_with_constraints"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverWithConstraints"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ TSystemMatrixPointerType mpTMatrix = NULL; /// This is matrix containing the global relation for the constraints TSystemMatrixPointerType mpOldAMatrix = NULL; /// This is matrix containing the old LHS structure TSystemVectorPointerType mpConstantVector = NULL; /// This is vector containing the rigid movement of the constraint TSystemVectorPointerType mpDeltaConstantVector = NULL; /// This is vector contains the effective constant displacement DofsArrayType mDoFMasterFixedSet; /// The set containing the fixed master DoF of the system DofsArrayType mDoFSlaveSet; /// The set containing the slave DoF of the system SizeType mDoFToSolveSystemSize = 0; /// Number of degrees of freedom of the problem to actually be solved IndexMapType mReactionEquationIdMap; /// In order to know the corresponding EquaionId for each component of the reaction vector bool mCheckConstraintRelation = false; /// If we do a constraint check relation bool mResetRelationMatrixEachIteration = false; /// If we reset the relation matrix at each iteration bool mComputeConstantContribution = false; /// If we compute the constant contribution of the MPC bool mCleared = true; /// If the system has been reseted ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the global relation matrix (T matrix used to impose the MPC) * @param rT The global relation matrix * @param rTransformationMatrix The local transformation contribution * @param rSlaveEquationId The equation id of the slave dofs * @param rMasterEquationId The equation id of the master dofs */ void AssembleRelationMatrix( TSystemMatrixType& rT, const LocalSystemMatrixType& rTransformationMatrix, const EquationIdVectorType& rSlaveEquationId, const EquationIdVectorType& rMasterEquationId ) { const SizeType local_size_1 = rTransformationMatrix.size1(); for (IndexType i_local = 0; i_local < local_size_1; ++i_local) { IndexType i_global = rSlaveEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rT, rTransformationMatrix, i_global, i_local, rMasterEquationId); } } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) ConstructMatrixStructureWithConstraints(pScheme, rA, rModelPart); else BaseType::ConstructMatrixStructure(pScheme, rA, rModelPart); } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void BuildAndSolveWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY Timer::Start("Build"); // We apply the master/slave relationship before build ApplyMasterSlaveRelation(pScheme, rModelPart, rA, rDx, rb); // We compute the effective constant vector TSystemVectorType dummy_Dx(mDoFToSolveSystemSize); TSparseSpace::SetToZero(dummy_Dx); ComputeEffectiveConstant(pScheme, rModelPart, dummy_Dx); // We do the build (after that we resize the solution vector to avoid problems) BuildWithConstraints(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Now we apply the BC rDx.resize(mDoFToSolveSystemSize, false); ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; // We solve the system of equations const auto timer = BuiltinTimer(); const double start_solve = timer.ElapsedSeconds(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); const double stop_solve = timer.ElapsedSeconds(); // We compute the effective constant vector ComputeEffectiveConstant(pScheme, rModelPart, rDx); // We reconstruct the Unknowns vector and the residual const double start_reconstruct_slaves = timer.ElapsedSeconds(); ReconstructSlaveSolutionAfterSolve(pScheme, rModelPart, rA, rDx, rb); const double stop_reconstruct_slaves = timer.ElapsedSeconds(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Reconstruct slaves time: " << stop_reconstruct_slaves - start_reconstruct_slaves << std::endl; // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void BuildRHSWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { Timer::Start("Build RHS"); // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Builing without BC BuildRHSNoDirichlet(pScheme,rModelPart,rb); Timer::Stop("Build RHS"); ApplyDirichletConditionsRHS(pScheme, rModelPart, rb); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; if (BaseType::mCalculateReactionsFlag) { for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof const IndexType equation_id = r_dof.EquationId(); r_reactions_vector[mReactionEquationIdMap[equation_id]] += rb[equation_id]; } } } // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nRHS vector = " << rb << std::endl; } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs. * @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSetWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; // Declaring temporal variables DofsArrayType dof_temp_all, dof_temp_solvable, dof_temp_slave; // We assign an empty dof array to our dof sets BaseType::mDofSet = DofsArrayType(); /// This corresponds with all the DoF of the system mDoFSlaveSet = DofsArrayType(); /// This corresponds with the slave (the ones not solved after compacting the system using MPC) /** * Here we declare three sets. * - The global set: Contains all the DoF of the system * - The slave set: The DoF that are not going to be solved, due to MPC formulation */ set_type dof_global_set, dof_global_slave_set; #pragma omp parallel firstprivate(dof_list, second_dof_list) { const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We cleate the temporal set and we reserve some space on them set_type dofs_tmp_set, dof_temp_slave_set; dofs_tmp_set.reserve(20000); dof_temp_slave_set.reserve(200); // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int number_of_elements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of constraints from the modeler auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); dof_temp_slave_set.insert(dof_list.begin(), dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); dof_global_slave_set.insert(dof_temp_slave_set.begin(), dof_temp_slave_set.end()); } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; /// We transfer the temporal sets to our DoF set dof_temp_all.reserve(dof_global_set.size()); for (auto p_dof : dof_global_set) { dof_temp_all.push_back( p_dof ); } dof_temp_all.Sort(); BaseType::mDofSet = dof_temp_all; dof_temp_slave.reserve(dof_global_slave_set.size()); for (auto p_dof : dof_global_slave_set) { dof_temp_slave.push_back( p_dof ); } dof_temp_slave.Sort(); mDoFSlaveSet = dof_temp_slave; // Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", mDoFSlaveSet.size() == 0) << "No slave degrees of freedom to solve!" << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; if (BaseType::mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_destroy_lock(&BaseType::mLockArray[i]); } } BaseType::mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_init_lock(&BaseType::mLockArray[i]); } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id()<< std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b = 0.0; if (TSparseSpace::Size(rb) > 0) norm_b = TSparseSpace::TwoNorm(rb); if (norm_b > 0.0) { // Create the auxiliar dof set DofsArrayType aux_dof_set; aux_dof_set.reserve(mDoFToSolveSystemSize); for (auto& r_dof : BaseType::mDofSet) { if (r_dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(r_dof); if (it == mDoFSlaveSet.end()) aux_dof_set.push_back( &r_dof ); } } aux_dof_set.Sort(); KRATOS_ERROR_IF_NOT(aux_dof_set.size() == mDoFToSolveSystemSize) << "Inconsistency (I) in system size: " << mDoFToSolveSystemSize << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; KRATOS_ERROR_IF_NOT(aux_dof_set.size() == rA.size1()) << "Inconsistency (II) in system size: " << rA.size1() << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, aux_dof_set, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function * @details Has the call to ApplyConstraints function call once the element and conditions compute their equation ids * @todo Move this method to a common class with block builder and solver with constraints */ virtual void ConstructMatrixStructureWithConstraints( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); // The total number of dof of the system const SizeType equation_size = BaseType::mEquationSystemSize; // This vector contains the indexes sets for all rows std::vector<IndexSetType> indices(equation_size); // We reserve some indexes on each row #pragma omp parallel for firstprivate(equation_size) for (int index = 0; index < static_cast<int>(equation_size); ++index) indices[index].reserve(40); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs #pragma omp parallel firstprivate(ids, second_ids) { // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<IndexSetType> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId(*it_elem, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->EquationId(*it_cond, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); // Constraint initial iterator const auto const_begin = rModelPart.MasterSlaveConstraints().begin(); // We iterate over the constraints #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); // Slave DoFs for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } // Master DoFs for (auto& id_i : second_ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : second_ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = CompressedMatrixType(indices.size(), indices.size(), nnz); double *Avalues = rA.value_data().begin(); IndexType *Arow_indices = rA.index1_data().begin(); IndexType *Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(rA.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rA.size1()); ++i) { const IndexType row_begin = Arow_indices[i]; const IndexType row_end = Arow_indices[i + 1]; IndexType k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function has the call to ApplyConstraints function call once the element and conditions compute their equation slave_ids * @param pScheme The pointer to the integration scheme * @param rT The global relation matrix * @param rModelPart The model part to compute */ virtual void ConstructRelationMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rT, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("RelationMatrixStructure"); IndexMapType solvable_dof_reorder; std::unordered_map<IndexType, IndexSetType> master_indices; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; typedef std::pair<IndexType, IndexSetType> IndexIndexSetPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({counter}))); ++counter; } else { master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({}))); } } } // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); // TODO: OMP for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); for (auto& slave_id : ids) { if (slave_id < BaseType::mEquationSystemSize) { auto it_slave = solvable_dof_reorder.find(slave_id); if (it_slave == solvable_dof_reorder.end()) { for (auto& master_id : second_ids) { if (master_id < BaseType::mEquationSystemSize) { auto& master_row_indices = master_indices[slave_id]; master_row_indices.insert(solvable_dof_reorder[master_id]); } } } } } } } KRATOS_DEBUG_ERROR_IF_NOT(BaseType::mEquationSystemSize == master_indices.size()) << "Inconsistency in the dofs size: " << BaseType::mEquationSystemSize << "\t vs \t" << master_indices.size() << std::endl; // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) { nnz += master_indices[i].size(); } rT = CompressedMatrixType(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, nnz); double *Tvalues = rT.value_data().begin(); IndexType *Trow_indices = rT.index1_data().begin(); IndexType *Tcol_indices = rT.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Trow_indices[0] = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) Trow_indices[i + 1] = Trow_indices[i] + master_indices[i].size(); KRATOS_DEBUG_ERROR_IF_NOT(Trow_indices[BaseType::mEquationSystemSize] == nnz) << "Nonzero values does not coincide with the row index definition: " << Trow_indices[BaseType::mEquationSystemSize] << " vs " << nnz << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(rT.size1()); ++i) { const IndexType row_begin = Trow_indices[i]; const IndexType row_end = Trow_indices[i + 1]; IndexType k = row_begin; for (auto it = master_indices[i].begin(); it != master_indices[i].end(); ++it) { Tcol_indices[k] = *it; Tvalues[k] = 0.0; k++; } master_indices[i].clear(); //deallocating the memory std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]); } rT.set_filled(BaseType::mEquationSystemSize + 1, nnz); // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rT(solv_dof.first, solv_dof.second) = 1.0; } Timer::Stop("RelationMatrixStructure"); } /** * @brief This function is exactly same as the Build() function in base class except that the function * @details It has the call to ApplyConstraints function call once the LHS or RHS are computed by elements and conditions * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector * @param UseBaseBuild If the abse Build function will be used */ void BuildWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb, const bool UseBaseBuild = true ) { KRATOS_TRY // We build the original system if (UseBaseBuild) BaseType::Build(pScheme, rModelPart, rA, rb); else BuildWithoutConstraints(pScheme, rModelPart, rA, rb); // Assemble the constraints const auto timer = BuiltinTimer(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(rA, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } // The auxiliar matrix to store the intermediate matrix multiplication TSystemMatrixType auxiliar_A_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); // We do a backup of the matrix before apply the constraints if (mpOldAMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewOldAMatrix = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpOldAMatrix.swap(pNewOldAMatrix); } (*mpOldAMatrix).swap(rA); // We resize of system of equations rA.resize(mDoFToSolveSystemSize, mDoFToSolveSystemSize, false); rb.resize(mDoFToSolveSystemSize, false); // Final multiplication SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, rTMatrix, rA); TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); // Cleaning up memory auxiliar_A_matrix.resize(0, 0, false); T_transpose_matrix.resize(0, 0, false); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() >= 1) << "Constraint relation build time and multiplication: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 2) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY // Assemble the constraints const auto timer = BuiltinTimer(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // We build the original system TSystemMatrixType A; // Dummy auxiliar matrix we ned to build anyway because are needed to impose the rigid displacements if (mComputeConstantContribution) { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); BuildWithoutConstraints(pScheme, rModelPart, A, rb); } else { BuildRHSNoDirichletWithoutConstraints(pScheme, rModelPart, rb); } // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(A, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } rb.resize(mDoFToSolveSystemSize, false); // Final multiplication TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() >= 1) << "Constraint relation build time and multiplication: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 2) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief This method resize and initializes the system of euqations * @details Additionally what is done in the base class the constraints are initialized * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { // We resize the basic system BaseType::ResizeAndInitializeVectors(pScheme, pA, pDx, pb, rModelPart); // If needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag) { const SizeType reactions_vector_size = BaseType::mDofSet.size() - mDoFToSolveSystemSize + mDoFMasterFixedSet.size(); TSystemVectorType& rReactionsVector = *(BaseType::mpReactionsVector); if (rReactionsVector.size() != reactions_vector_size) rReactionsVector.resize(reactions_vector_size, false); } // Now we resize the relation matrix used on the MPC solution if(rModelPart.MasterSlaveConstraints().size() > 0) { if (mpTMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewT = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpTMatrix.swap(pNewT); } // The rigid movement if (mpConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpConstantVector.swap(pNewConstantVector); } // The effective rigid movement if (mpDeltaConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpDeltaConstantVector.swap(pNewConstantVector); } // System matrices/vectors TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; // Resizing the system matrix if (rTMatrix.size1() == 0 || BaseType::GetReshapeMatrixFlag() || mCleared) { // If the matrix is not initialized rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } else { if (rTMatrix.size1() != BaseType::mEquationSystemSize || rTMatrix.size2() != mDoFToSolveSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } } // Resizing the system vector // The rigid movement if (rConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } else { if (rConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } } // The effective rigid movement if (mComputeConstantContribution) { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } else { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } } } } } /** * @brief It computes the reactions of the system * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY // Refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; // Updating variables for (auto& r_dof : BaseType::mDofSet) { if ((r_dof.IsFixed()) || mDoFSlaveSet.find(r_dof) != mDoFSlaveSet.end()) { r_dof.GetSolutionStepReactionValue() = -r_reactions_vector[mReactionEquationIdMap[r_dof.EquationId()]]; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::CalculateReactions failed .."); } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details In the base ResidualBasedEliminationBuilderAndSolver does nothing, due to the fact that the BC are automatically managed with the elimination. But in the constrints approach the slave DoF depending on fixed DoFs must be reconstructed * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // We apply the same method as in the block builder and solver but instead of fixing the fixed Dofs, we just fix the master fixed Dofs std::vector<double> scaling_factors (mDoFToSolveSystemSize, 0.0); // NOTE: Dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it_first_check = mDoFSlaveSet.find(*it_dof); if (it_first_check == mDoFSlaveSet.end()) { auto it_second_check = mDoFSlaveSet.find(*it_dof); if (it_second_check == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end()) { scaling_factors[counter] = 1.0; } } counter += 1; } } } double* Avalues = rA.value_data().begin(); IndexType* Arow_indices = rA.index1_data().begin(); IndexType* Acol_indices = rA.index2_data().begin(); // Detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; bool empty = true; for (IndexType j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty) { rA(k,k) = 1.0; rb[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; const double k_factor = scaling_factors[k]; if (k_factor == 0) { // Zero out the whole row, except the diagonal for (IndexType j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // Zero out the RHS rb[k] = 0.0; } else { // Zero out the column which is associated with the zero'ed row for (IndexType j = col_begin; j < col_end; ++j) { if(scaling_factors[ Acol_indices[j] ] == 0 ) { Avalues[j] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { BaseType::Clear(); // Reseting auxiliar set of dofs mDoFMasterFixedSet = DofsArrayType(); mDoFSlaveSet = DofsArrayType(); // Clearing the relation map mReactionEquationIdMap.clear(); // Clear constraint system if (mpTMatrix != nullptr) TSparseSpace::Clear(mpTMatrix); if (mpConstantVector != nullptr) TSparseSpace::Clear(mpConstantVector); if (mpDeltaConstantVector != nullptr) TSparseSpace::Clear(mpDeltaConstantVector); // Set the flag mCleared = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); mCheckConstraintRelation = ThisParameters["check_constraint_relation"].GetBool(); mResetRelationMatrixEachIteration = ThisParameters["reset_relation_matrix_each_iteration"].GetBool(); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints * @param rModelPart The model part of the problem to solve */ void SetUpSystemWithConstraints(ModelPart& rModelPart) { KRATOS_TRY // First we set up the system of equations without constraints // Set equation id for degrees of freedom the free degrees of freedom are positioned at the beginning of the system, while the fixed one are at the end (in opposite order). // // That means that if the EquationId is greater than "mEquationSystemSize" the pointed degree of freedom is restrained // This is almost the same SetUpSystem from ResidualBasedEliminationBuilderAndSolver, but we don't discard from the system the fixed dofs that are part of a constraint at the same time /// First we detect the master fixed DoFs /// // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Vector containing the localization in the system of the different terms DofsVectorType slave_dof_list, master_dof_list; // Declaring temporal variables DofsArrayType dof_temp_fixed_master; typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; set_type dof_global_fixed_master_set; // Iterate over constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); #pragma omp parallel firstprivate(slave_dof_list, master_dof_list) { // We cleate the temporal set and we reserve some space on them set_type dof_temp_fixed_master_set; dof_temp_fixed_master_set.reserve(2000); #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->GetDofList(slave_dof_list, master_dof_list, r_current_process_info); // Filling the set of dofs master and fixed at the same time for (auto& master_dof : master_dof_list) { if (master_dof->IsFixed()) { dof_temp_fixed_master_set.insert(master_dof); } } } } // We merge all the sets in one thread #pragma omp critical { dof_global_fixed_master_set.insert(dof_temp_fixed_master_set.begin(), dof_temp_fixed_master_set.end()); } } dof_temp_fixed_master.reserve(dof_global_fixed_master_set.size()); for (auto p_dof : dof_global_fixed_master_set) { dof_temp_fixed_master.push_back( p_dof ); } dof_temp_fixed_master.Sort(); mDoFMasterFixedSet = dof_temp_fixed_master; /// Now we compute as expected /// int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto& dof : BaseType::mDofSet) { if (dof.IsFixed()) { auto it = mDoFMasterFixedSet.find(dof); if (it == mDoFMasterFixedSet.end()) { dof.SetEquationId(--fix_id); } else { dof.SetEquationId(free_id++); } } else { dof.SetEquationId(free_id++); } } BaseType::mEquationSystemSize = fix_id; // Add the computation of the global ids of the solvable dofs IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { ++counter; } } } // The total system of equations to be solved mDoFToSolveSystemSize = counter; // Finally we build the relation between the EquationID and the component of the reaction counter = 0; for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof mReactionEquationIdMap.insert({r_dof.EquationId(), counter}); ++counter; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::SetUpSystemWithConstraints failed .."); } /** * @brief This method initializes the DoF using the master/slave relationship * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void ApplyMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // First we reset the slave dofs ConstraintUtilities::ResetSlaveDofs(rModelPart); // Now we apply the constraints ConstraintUtilities::ApplyConstraints(rModelPart); KRATOS_CATCH(""); } /** * @brief This method checks that the master/slave relation is properly set * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDx The vector of unkowns * @param rDxSolved The vector of unkowns actually solved */ bool CheckMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDx, TSystemVectorType& rDxSolved ) { KRATOS_TRY // Auxiliar values const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType current_solution(mDoFToSolveSystemSize); TSystemVectorType updated_solution(BaseType::mEquationSystemSize); TSystemVectorType residual_solution(BaseType::mEquationSystemSize); // Get current values IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { current_solution[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { residual_solution[equation_id] = it_dof->GetSolutionStepValue() + rDx[equation_id]; } } // Apply master slave constraints const TSystemMatrixType& rTMatrix = *mpTMatrix; TSparseSpace::Mult(rTMatrix, current_solution, updated_solution); if (mComputeConstantContribution) { ComputeConstraintContribution(pScheme, rModelPart, false, true); const TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::UnaliasedAdd(updated_solution, 1.0, rConstantVector); } TSparseSpace::UnaliasedAdd(residual_solution, -1.0, updated_solution); // Check database for(int k = 0; k < static_cast<int>(BaseType::mEquationSystemSize); ++k) { if (std::abs(residual_solution[k]) > std::numeric_limits<double>::epsilon()) return false; } return true; KRATOS_CATCH(""); } /** * @brief This method reconstructs the slave solution after Solving. * @param pScheme The pointer to the integration scheme * @param rModelPart Reference to the ModelPart containing the problem. * @param rA System matrix * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void ReconstructSlaveSolutionAfterSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // We get the global T matrix and the constant vector const TSystemMatrixType& rTMatrix = *mpTMatrix; // We reconstruct the complete vector of Unknowns TSystemVectorType Dx_copy = rDx; rDx.resize(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, Dx_copy, rDx); // Add the constant vector if (mComputeConstantContribution) { const TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::UnaliasedAdd(rDx, 1.0, rDeltaConstantVector); } // We check the solution if (mCheckConstraintRelation) { KRATOS_ERROR_IF_NOT(CheckMasterSlaveRelation(pScheme, rModelPart, rDx, Dx_copy)) << "The relation between master/slave dofs is not respected" << std::endl; } // Simply restore old LHS (rA).swap(*mpOldAMatrix); mpOldAMatrix = NULL; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::ReconstructSlaveSolutionAfterSolve failed .."); } /** * @brief Function to perform the build the system without constraints * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void BuildWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) { // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemMatrixType lhs_contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( lhs_contribution, rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_elem, lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_cond, lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); } } } } /** * @brief Function to perform the build of the RHS without constraints * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichletWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->CalculateRHSContribution(*it_elem, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateRHSContribution(*it_cond, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } } } /** * @brief This function does the assembling of the LHS and RHS * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void AssembleWithoutConstraints( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); // Assemble RHS AssembleRHSWithoutConstraints(rb, rRHSContribution, rEquationId); // Assemble LHS for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); } } } /** * @brief Assembling local contribution of nodes and elements in the RHS * @param rb The RHS vector */ void AssembleRHSWithoutConstraints( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rRHSContribution.size(); if (!BaseType::mCalculateReactionsFlag) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // free dof // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; auto it_dof = BaseType::mDofSet.begin() + i_global; const bool is_master_fixed = mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(*it_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof double& r_b_value = r_reactions_vector[mReactionEquationIdMap[i_global]]; const double rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } else if (it_dof->IsFree()) { // Free dof not in the MPC // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } } } } /** * @brief This method set to zero the relation matrix */ void ResetConstraintSystem() { TSystemMatrixType& rTMatrix = *mpTMatrix; double *Tvalues = rTMatrix.value_data().begin(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rTMatrix.nnz()); ++i) { Tvalues[i] = 0.0; } IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rTMatrix(solv_dof.first, solv_dof.second) = 1.0; } if (mComputeConstantContribution) { TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::SetToZero(rConstantVector); } } /** * @brief This method applies the BC, only in the RHS * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void ApplyDirichletConditionsRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // NOTE: dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { auto it_dof = it_dof_begin + k; if (k < static_cast<int>(BaseType::mEquationSystemSize)) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) != mDoFMasterFixedSet.end()) { rb[k] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This method computes the absolute constant contribution of the MPC * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param ComputeTranslationMatrix If the translation matrix will be assembled * @param ComputeConstantVector If the constant vector will be assembled * @return If there are constant constraints */ bool ComputeConstraintContribution( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, const bool ComputeTranslationMatrix = false, const bool ComputeConstantVector = false ) { KRATOS_TRY; // We build the global T matrix and the g constant vector TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; // Filling constant vector if (ComputeConstantVector) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mEquationSystemSize); ++i) { rConstantVector[i] = 0.0; } } // Auxiliar set to reorder master DoFs IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Initialize the constant vector double aux_constant_value = 0.0; // Contributions to the system LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0); LocalSystemVectorType constant_vector = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType slave_equation_id, master_equation_id; const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); std::unordered_set<IndexType> auxiliar_constant_equations_ids; #pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_id, master_equation_id) { std::unordered_set<IndexType> auxiliar_temp_constant_equations_ids; auxiliar_temp_constant_equations_ids.reserve(2000); #pragma omp for schedule(guided, 512) for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info); it_const->EquationIdVector(slave_equation_id, master_equation_id, r_current_process_info); // Reassign reordered dofs to the master side for (auto& id : master_equation_id) { id = solvable_dof_reorder[id]; } if (ComputeConstantVector) { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; if (std::abs(constant_value) > 0.0) { auxiliar_temp_constant_equations_ids.insert(i_global); double& r_value = rConstantVector[i_global]; AtomicAdd(r_value, constant_value); } } } } else { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; AtomicAdd(aux_constant_value, std::abs(constant_value)); } } } if (ComputeTranslationMatrix) { // Assemble the constraint contribution AssembleRelationMatrix(rTMatrix, transformation_matrix, slave_equation_id, master_equation_id); } } } // We merge all the sets in one thread #pragma omp critical { auxiliar_constant_equations_ids.insert(auxiliar_temp_constant_equations_ids.begin(), auxiliar_temp_constant_equations_ids.end()); } } return aux_constant_value > std::numeric_limits<double>::epsilon() ? true : false; KRATOS_CATCH(""); } /** * @brief This method computes the efective constant * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDxSolved The vector of unkowns actually solved */ void ComputeEffectiveConstant( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDxSolved ) { if (mComputeConstantContribution) { // We get const TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::Copy(rConstantVector, rDeltaConstantVector); // We reconstruct the complete vector of Unknowns TSystemVectorType Dx(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, rDxSolved, Dx); // Compute the effective constant vector // Auxiliar initial dof iterator const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType u(BaseType::mEquationSystemSize); #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { u[equation_id] = it_dof->GetSolutionStepValue() + Dx[equation_id]; } } TSystemVectorType u_bar(mDoFToSolveSystemSize); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { u_bar[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } TSystemVectorType u_bar_complete(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, u_bar, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, 1.0, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, -1.0, u); } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolverWithConstraints */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
GB_unaryop__identity_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_uint8 // op(A') function: GB_tran__identity_uint32_uint8 // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_uint8 ( uint32_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tool_not_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s // 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV" // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ #ifdef CODE #include "stdio.h" #include "omp.h" #include "omp-tools.h" int main() { #pragma omp parallel num_threads(2) { #pragma omp master { int result = omp_control_tool(omp_control_tool_start, 0, NULL); printf("0: control_tool()=%d\n", result); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: Do not initialize tool // CHECK: {{^}}0: control_tool()=-2 return 0; } #endif /* CODE */ #ifdef TOOL #include <omp-tools.h> #include "stdio.h" ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { printf("0: Do not initialize tool\n"); return NULL; } #endif /* TOOL */
omp-parallel-for.c
extern void abort (void); main() { int i, a; a = 30; #pragma omp parallel for firstprivate (a) lastprivate (a) \ num_threads (2) schedule(static) for (i = 0; i < 10; i++) a = a + i; /* The thread that owns the last iteration will have computed 30 + 5 + 6 + 7 + 8 + 9 = 65. */ if (a != 65) abort (); return 0; }
wave3d_perf_b.c
#ifndef TAPENADE #include <math.h> #endif #define Max(x,y) fmax(x,y) #define Min(x,y) fmin(x,y) #define Heaviside(x) ((x>=0)?1.0:0.0) #define u(x,xx,xxx) u[x][xx][xxx] #define u_b(x,xx,xxx) ub[x][xx][xxx] #define c(x,xx,xxx) c[x][xx][xxx] #define u_1(x,xx,xxx) u_1[x][xx][xxx] #define u_1_b(x,xx,xxx) u_1b[x][xx][xxx] #define u_2(x,xx,xxx) u_2[x][xx][xxx] #define u_2_b(x,xx,xxx) u_2b[x][xx][xxx] void wave3d_perf_b(double *u_vec, double *ub_vec, double *u_1_vec, double *u_1b_vec, double *u_2_vec, double *u_2b_vec, double *c_vec, double D, int n) { double (*u)[n][n] = (double (*)[n][n]) u_vec; double (*c)[n][n] = (double (*)[n][n]) c_vec; double (*u_1)[n][n] = (double (*)[n][n]) u_1_vec; double (*u_2)[n][n] = (double (*)[n][n]) u_2_vec; double (*ub)[n][n] = (double (*)[n][n]) ub_vec; double (*u_1b)[n][n] = (double (*)[n][n]) u_1b_vec; double (*u_2b)[n][n] = (double (*)[n][n]) u_2b_vec; int i; int j; int k; i=0; #pragma omp parallel for private(k,j) for ( j=1; j<=n - 2; j++ ) { for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); } } i=n - 2; j=0; #pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } i=n - 2; j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=n - 2; j=n - 2; k=n - 2; u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); i=n - 2; j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=n - 2; #pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=n - 2; j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); i=n - 2; j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=n - 2; j=1; #pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; j=n - 1; #pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=n - 2; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } i=n - 2; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=n - 2; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=n - 2; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } i=1; j=0; #pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } i=1; j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=1; j=n - 2; k=n - 2; u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); i=1; j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=n - 2; #pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); i=1; j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); i=1; j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); i=1; j=1; #pragma omp parallel for private(k) for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; j=n - 1; #pragma omp parallel for private(k) for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=1; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } i=1; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } i=1; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } i=1; #pragma omp parallel for private(k,j) for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } i=n - 1; #pragma omp parallel for private(k,j) for ( j=1; j<=n - 2; j++ ) { for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=0; for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=n - 2; u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 2; for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=1; for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { j=n - 1; for ( k=1; k<=n - 2; k++ ) { u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=0; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=n - 2; u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=1; u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { k=n - 1; u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } #pragma omp parallel for private(k,j,i) for ( i=2; i<=n - 3; i++ ) { for ( j=2; j<=n - 3; j++ ) { for ( k=2; k<=n - 3; k++ ) { u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1); u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k); u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k); u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k); u_2_b(i,j,k) += -u_b(i, j, k); u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k); u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k); u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1); } } } }
GB_AxB_dot2.c
//------------------------------------------------------------------------------ // GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in place //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // GB_AxB_dot2 does its computation in two phases. The first phase counts the // number of entries in each column of C. The second phase can then construct // the result C in place, and thus this method can be done in parallel for the // single matrix computation C=A'*B. // Two variants are handled: C=A'*B and C<!M>=A'*B. // The C<M>=A'*B computation is computed by GB_AxB_dot3. #include "GB_mxm.h" #include "GB_iterator.h" #ifndef GBCOMPACT #include "GB_AxB__include.h" #endif #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (B_slice, nbslice+1, sizeof (int64_t)) ; \ if (C_counts != NULL) \ { \ for (int taskid = 0 ; taskid < naslice ; taskid++) \ { \ GB_FREE_MEMORY (C_counts [taskid], cnvec, sizeof (int64_t)) ; \ } \ } \ GB_FREE_MEMORY (C_counts, naslice, sizeof (int64_t *)) ; \ } GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method ( GrB_Matrix *Chandle, // output matrix const GrB_Matrix M, // mask matrix for C<!M>=A'*B // if present, the mask is complemented const GrB_Matrix *Aslice, // input matrices (already sliced) const GrB_Matrix B, // input matrix const GrB_Semiring semiring, // semiring that defines C=A*B const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b) bool *mask_applied, // if true, mask was applied int nthreads, int naslice, int nbslice, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Aslice != NULL) ; GrB_Matrix A = Aslice [0] ; // just for type and dimensions ASSERT (Chandle != NULL) ; ASSERT (*Chandle == NULL) ; ASSERT_OK_OR_NULL (GB_check (M, "M for dot A'*B", GB0)) ; ASSERT_OK (GB_check (A, "A for dot A'*B", GB0)) ; for (int taskid = 0 ; taskid < naslice ; taskid++) { ASSERT_OK (GB_check (Aslice [taskid], "A slice for dot2 A'*B", GB0)) ; ASSERT (!GB_PENDING (Aslice [taskid])) ; ASSERT (!GB_ZOMBIES (Aslice [taskid])) ; ASSERT ((Aslice [taskid])->vlen == B->vlen) ; ASSERT (A->vlen == (Aslice [taskid])->vlen) ; ASSERT (A->vdim == (Aslice [taskid])->vdim) ; ASSERT (A->type == (Aslice [taskid])->type) ; } ASSERT_OK (GB_check (B, "B for dot A'*B", GB0)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT_OK (GB_check (semiring, "semiring for numeric A'*B", GB0)) ; ASSERT (A->vlen == B->vlen) ; ASSERT (mask_applied != NULL) ; int64_t *restrict B_slice = NULL ; int64_t **C_counts = NULL ; int64_t cnvec = B->nvec ; //-------------------------------------------------------------------------- // get the semiring operators //-------------------------------------------------------------------------- GrB_BinaryOp mult = semiring->multiply ; GrB_Monoid add = semiring->add ; ASSERT (mult->ztype == add->op->ztype) ; bool op_is_first = mult->opcode == GB_FIRST_opcode ; bool op_is_second = mult->opcode == GB_SECOND_opcode ; bool A_is_pattern = false ; bool B_is_pattern = false ; if (flipxy) { // z = fmult (b,a) will be computed A_is_pattern = op_is_first ; B_is_pattern = op_is_second ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->ytype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->xtype))) ; } else { // z = fmult (a,b) will be computed A_is_pattern = op_is_second ; B_is_pattern = op_is_first ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->xtype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->ytype))) ; } (*Chandle) = NULL ; //-------------------------------------------------------------------------- // allocate workspace and slice B //-------------------------------------------------------------------------- if (!GB_pslice (&B_slice, /* B */ B->p, B->nvec, nbslice)) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute # of entries in each vector of C //-------------------------------------------------------------------------- GrB_Type ctype = add->op->ztype ; int64_t cvlen = A->vdim ; int64_t cvdim = B->vdim ; if (B->nvec_nonempty < 0) { B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ; } GB_CALLOC_MEMORY (C_counts, naslice, sizeof (int64_t *)) ; if (C_counts == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++) { int64_t *restrict C_count = NULL ; GB_CALLOC_MEMORY (C_count, B->nvec, sizeof (int64_t)) ; if (C_count == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } C_counts [a_taskid] = C_count ; } for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++) { if ((Aslice [a_taskid])->nvec_nonempty < 0) { (Aslice [a_taskid])->nvec_nonempty = GB_nvec_nonempty (Aslice [a_taskid], NULL) ; } } #define GB_PHASE_1_OF_2 #include "GB_AxB_dot2_meta.c" #undef GB_PHASE_1_OF_2 GB_NEW (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true, GB_SAME_HYPER_AS (B->is_hyper), B->hyper_ratio, cnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_WORK ; return (info) ; } GrB_Matrix C = (*Chandle) ; int64_t *restrict Cp = C->p ; // cumulative sum of counts in each column #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < cnvec ; k++) { int64_t s = 0 ; for (int taskid = 0 ; taskid < naslice ; taskid++) { int64_t *restrict C_count = C_counts [taskid] ; int64_t c = C_count [k] ; C_count [k] = s ; s += c ; } Cp [k] = s ; } Cp [cnvec] = 0 ; C->nvec = cnvec ; // Cp = cumulative sum of Cp GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ; int64_t cnz = Cp [cnvec] ; // C->h = B->h if (B->is_hyper) { GB_memcpy (C->h, B->h, cnvec * sizeof (int64_t), nthreads) ; } // free C_count for the first thread; it is no longer needed GB_FREE_MEMORY (C_counts [0], cnvec, sizeof (int64_t)) ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // allocate C->x and C->i //-------------------------------------------------------------------------- info = GB_ix_alloc (C, cnz, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_MATRIX_FREE (Chandle) ; GB_FREE_WORK ; return (info) ; } //-------------------------------------------------------------------------- // C = A'*B, computing each entry with a dot product, via builtin semiring //-------------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- #define GB_Adot2B(add,mult,xyname) GB_Adot2B_ ## add ## mult ## xyname #define GB_AxB_WORKER(add,mult,xyname) \ { \ info = GB_Adot2B (add,mult,xyname) (C, M, \ Aslice, A_is_pattern, B, B_is_pattern, B_slice, \ C_counts, nthreads, naslice, nbslice) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- GB_Opcode mult_opcode, add_opcode ; GB_Type_code xycode, zcode ; if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring, flipxy, &mult_opcode, &add_opcode, &xycode, &zcode)) { #include "GB_AxB_factory.c" } ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ; #endif //-------------------------------------------------------------------------- // user semirings created at compile time //-------------------------------------------------------------------------- if (semiring->object_kind == GB_USER_COMPILED) { // determine the required type of A and B for the user semiring GrB_Type atype_required, btype_required ; if (flipxy) { // A is passed as y, and B as x, in z = mult(x,y) atype_required = mult->ytype ; btype_required = mult->xtype ; } else { // A is passed as x, and B as y, in z = mult(x,y) atype_required = mult->xtype ; btype_required = mult->ytype ; } if (A->type == atype_required && B->type == btype_required) { info = GB_AxB_user (GxB_AxB_DOT, semiring, Chandle, M, NULL, B, flipxy, /* heap: */ NULL, NULL, NULL, 0, /* Gustavson: */ NULL, /* dot2: */ Aslice, B_slice, nthreads, naslice, nbslice, C_counts, /* dot3: */ NULL, 0) ; done = true ; } } //-------------------------------------------------------------------------- // C = A'*B, computing each entry with a dot product, with typecasting //-------------------------------------------------------------------------- if (!done) { //---------------------------------------------------------------------- // get operators, functions, workspace, contents of A, B, C, and M //---------------------------------------------------------------------- GxB_binary_function fmult = mult->function ; GxB_binary_function fadd = add->op->function ; size_t csize = C->type->size ; size_t asize = A_is_pattern ? 0 : A->type->size ; size_t bsize = B_is_pattern ? 0 : B->type->size ; size_t xsize = mult->xtype->size ; size_t ysize = mult->ytype->size ; // scalar workspace: because of typecasting, the x/y types need not // be the same as the size of the A and B types. // flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j) // flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j) size_t aki_size = flipxy ? ysize : xsize ; size_t bkj_size = flipxy ? xsize : ysize ; // GB_void *restrict identity = add->identity ; GB_void *restrict terminal = add->terminal ; GB_cast_function cast_A, cast_B ; if (flipxy) { // A is typecasted to y, and B is typecasted to x cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, B->type->code) ; } else { // A is typecasted to x, and B is typecasted to y cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, B->type->code) ; } //---------------------------------------------------------------------- // C = A'*B via dot products, function pointers, and typecasting //---------------------------------------------------------------------- // aki = A(k,i), located in Ax [pA] #define GB_GETA(aki,Ax,pA) \ GB_void aki [GB_PGI(aki_size)] ; \ if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize) ; // bkj = B(k,j), located in Bx [pB] #define GB_GETB(bkj,Bx,pB) \ GB_void bkj [GB_PGI(bkj_size)] ; \ if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize) ; // break if cij reaches the terminal value #define GB_DOT_TERMINAL(cij) \ if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \ { \ break ; \ } // C(i,j) = A(i,k) * B(k,j) #define GB_MULT(cij, aki, bkj) \ GB_MULTIPLY (cij, aki, bkj) ; \ // C(i,j) += A(i,k) * B(k,j) #define GB_MULTADD(cij, aki, bkj) \ GB_void zwork [GB_PGI(csize)] ; \ GB_MULTIPLY (zwork, aki, bkj) ; \ fadd (cij, cij, zwork) ; // define cij for each task #define GB_CIJ_DECLARE(cij) \ GB_void cij [GB_PGI(csize)] ; // address of Cx [p] #define GB_CX(p) Cx +((p)*csize) // save the value of C(i,j) #define GB_CIJ_SAVE(cij,p) \ memcpy (GB_CX (p), cij, csize) ; #define GB_ATYPE GB_void #define GB_BTYPE GB_void #define GB_CTYPE GB_void #define GB_PHASE_2_OF_2 // loops with function pointers cannot be vectorized #define GB_DOT_SIMD ; if (flipxy) { #define GB_MULTIPLY(z,x,y) fmult (z,y,x) #include "GB_AxB_dot2_meta.c" #undef GB_MULTIPLY } else { #define GB_MULTIPLY(z,x,y) fmult (z,x,y) #include "GB_AxB_dot2_meta.c" #undef GB_MULTIPLY } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; ASSERT_OK (GB_check (C, "dot: C = A'*B output", GB0)) ; ASSERT (*Chandle == C) ; (*mask_applied) = (M != NULL) ; return (GrB_SUCCESS) ; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(8*t1+Nx+13,256)),floord(16*t2+Nx+12,256)),floord(32*t3+Nx+28,256)),floord(16*t1-16*t2+Nz+Nx+11,256));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),256*t4+254),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__identity_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint16) // op(A') function: GB (_unop_tran__identity_fp32_uint16) // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint16) ( float *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
AuxiliaryUtilities.h
#pragma once // Author: Miguel Angel Celigueta maceli@cimne.upc.edu #include <pybind11/pybind11.h> #include "includes/define.h" #include "includes/model_part.h" #include "pre_utilities.h" namespace Kratos { class AuxiliaryUtilities { public: typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef GlobalPointersVector<Element> ParticleWeakVectorType; typedef GlobalPointersVector<Element>::iterator ParticleWeakIteratorType; KRATOS_CLASS_POINTER_DEFINITION(AuxiliaryUtilities); AuxiliaryUtilities() {}; virtual ~AuxiliaryUtilities() {}; void UpdateTimeInOneModelPart(ModelPart& r_model_part, const double& time, const double& dt, const bool& is_time_to_print); double ComputeAverageZStressFor2D(ModelPart& rSpheresModelPart) { ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); double sub_total = 0.0; double average_value = 0.0; #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++) { ElementsArrayType::iterator it = pElements.ptr_begin() + k; Element* p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(p_element); double z_tensor_value = (*p_sphere->mSymmStressTensor)(2,2); sub_total += z_tensor_value; } average_value = sub_total/(int)pElements.size(); return average_value; } }; }
GB_split_sparse.c
//------------------------------------------------------------------------------ // GB_split_sparse: split a sparse/hypersparse matrix into tiles //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORK \ GB_WERK_POP (C_ek_slicing, int64_t) ; \ GB_FREE_WERK (&Wp, Wp_size) ; #define GB_FREE_ALL \ GB_FREE_WORK ; \ GB_Matrix_free (&C) ; #include "GB_split.h" GrB_Info GB_split_sparse // split a sparse matrix ( GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 const GrB_Matrix A, // input matrix GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Info info ; int A_sparsity = GB_sparsity (A) ; bool A_is_hyper = (A_sparsity == GxB_HYPERSPARSE) ; ASSERT (A_is_hyper || A_sparsity == GxB_SPARSE) ; GrB_Matrix C = NULL ; GB_WERK_DECLARE (C_ek_slicing, int64_t) ; ASSERT_MATRIX_OK (A, "A sparse for split", GB0) ; int sparsity_control = A->sparsity ; float hyper_switch = A->hyper_switch ; bool csc = A->is_csc ; GrB_Type atype = A->type ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; size_t asize = atype->size ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; const int64_t *Tile_vdim = csc ? Tile_cols : Tile_rows ; const int64_t *Tile_vlen = csc ? Tile_rows : Tile_cols ; int64_t anvec = A->nvec ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- size_t Wp_size = 0 ; int64_t *restrict Wp = NULL ; Wp = GB_MALLOC_WERK (anvec, int64_t, &Wp_size) ; if (Wp == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_memcpy (Wp, Ap, anvec * sizeof (int64_t), nthreads_max) ; //-------------------------------------------------------------------------- // split A into tiles //-------------------------------------------------------------------------- int64_t akend = 0 ; for (int64_t outer = 0 ; outer < nouter ; outer++) { //---------------------------------------------------------------------- // find the starting and ending vector of these tiles //---------------------------------------------------------------------- // The tile appears in vectors avstart:avend-1 of A, and indices // aistart:aiend-1. const int64_t avstart = Tile_vdim [outer] ; const int64_t avend = Tile_vdim [outer+1] ; int64_t akstart = akend ; if (A_is_hyper) { // A is hypersparse: look for vector avend in the A->h hyper list. // The vectors to handle for this outer loop are in // Ah [akstart:akend-1]. akend = akstart ; int64_t pright = anvec - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (avend, Ah, akend, pright, found) ; // printf ("anvec %ld akstart: %ld akend: %ld avend %ld\n", // anvec, akstart, akend, avend) ; ASSERT (GB_IMPLIES (akstart <= akend-1, Ah [akend-1] < avend)) ; // for (int64_t k = akstart ; k < akend ; k++) // { // printf (" Ah [%ld] = %ld\n", k, Ah [k]) ; // } } else { // A is sparse; the vectors to handle are akstart:akend-1 akend = avend ; } // # of vectors in all tiles in this outer loop int64_t cnvec = akend - akstart ; int nth = GB_nthreads (cnvec, chunk, nthreads_max) ; // printf ("akend %ld\n", akend) ; //---------------------------------------------------------------------- // create all tiles for vectors akstart:akend-1 in A //---------------------------------------------------------------------- for (int64_t inner = 0 ; inner < ninner ; inner++) { // printf ("\ninner: %ld, cnvec %ld akstart %ld akend %ld\n", // inner, cnvec, akstart, akend) ; //------------------------------------------------------------------ // allocate C, C->p, and C->h for this tile //------------------------------------------------------------------ const int64_t aistart = Tile_vlen [inner] ; const int64_t aiend = Tile_vlen [inner+1] ; const int64_t cvdim = avend - avstart ; const int64_t cvlen = aiend - aistart ; C = NULL ; GB_OK (GB_new (&C, false, // new header atype, cvlen, cvdim, GB_Ap_malloc, csc, A_sparsity, hyper_switch, cnvec, Context)) ; C->sparsity = sparsity_control ; C->hyper_switch = hyper_switch ; C->nvec = cnvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; //------------------------------------------------------------------ // determine the boundaries of this tile //------------------------------------------------------------------ int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = akstart ; k < akend ; k++) { // printf ("look in kth vector of A: k = %ld\n", k) ; int64_t pA = Wp [k] ; int64_t pA_end = Ap [k+1] ; int64_t aknz = pA_end - pA ; // printf ("Wp [%ld] = %ld\n", k, Wp [k]) ; // printf ("Ap [%ld+1] = %ld\n", k, Ap [k+1]) ; // printf ("aknz %ld\n", aknz) ; if (aknz == 0 || Ai [pA] >= aiend) { // this vector of C is empty // printf ("empty\n") ; } else if (aknz > 256) { // use binary search to find aiend bool found ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH (aiend, Ai, pA, pright, found) ; #ifdef GB_DEBUG // check the results with a linear search int64_t p2 = Wp [k] ; for ( ; p2 < Ap [k+1] ; p2++) { if (Ai [p2] >= aiend) break ; } ASSERT (pA == p2) ; #endif } else { // use a linear-time search to find aiend for ( ; pA < pA_end ; pA++) { if (Ai [pA] >= aiend) break ; } #ifdef GB_DEBUG // check the results with a binary search bool found ; int64_t p2 = Wp [k] ; int64_t p2_end = Ap [k+1] - 1 ; GB_SPLIT_BINARY_SEARCH (aiend, Ai, p2, p2_end, found) ; ASSERT (pA == p2) ; #endif } Cp [k-akstart] = (pA - Wp [k]) ; // # of entries in this vector if (A_is_hyper) { Ch [k-akstart] = Ah [k] - avstart ; } // printf ("cknz is %ld\n", Cp [k-akstart]) ; } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth, Context) ; int64_t cnz = Cp [cnvec] ; // for (int64_t k = 0 ; k <= cnvec ; k++) // { // printf ("Cp [%ld] = %ld\n", k, Cp [k]) ; // } //------------------------------------------------------------------ // allocate C->i and C->x for this tile //------------------------------------------------------------------ GB_OK (GB_bix_alloc (C, cnz, false, false, true, true, Context)) ; int64_t *restrict Ci = C->i ; // memset (Ci, 255, cnz * sizeof (int64_t)) ; //------------------------------------------------------------------ // copy the tile from A into C //------------------------------------------------------------------ int C_ntasks, C_nthreads ; GB_SLICE_MATRIX (C, 8, chunk) ; // printf ("C_ntasks %d C_nthreads %d\n", C_ntasks, C_nthreads) ; // printf ("C->nzmax %ld C->nvec %ld\n", C->nzmax, C->nvec) ; bool done = false ; #ifndef GBCOMPACT { // no typecasting needed switch (asize) { #define GB_COPY(pC,pA) Cx [pC] = Ax [pA] case 1 : // uint8, int8, bool, or 1-byte user-defined #define GB_CTYPE uint8_t #include "GB_split_sparse_template.c" break ; case 2 : // uint16, int16, or 2-byte user-defined #define GB_CTYPE uint16_t #include "GB_split_sparse_template.c" break ; case 4 : // uint32, int32, float, or 4-byte user-defined #define GB_CTYPE uint32_t #include "GB_split_sparse_template.c" break ; case 8 : // uint64, int64, double, float complex, // or 8-byte user defined #define GB_CTYPE uint64_t #include "GB_split_sparse_template.c" break ; case 16 : // double complex or 16-byte user-defined #define GB_CTYPE uint64_t #undef GB_COPY #define GB_COPY(pC,pA) \ Cx [2*pC ] = Ax [2*pA ] ; \ Cx [2*pC+1] = Ax [2*pA+1] ; #include "GB_split_sparse_template.c" break ; default:; } } #endif if (!done) { // user-defined types #define GB_CTYPE GB_void #undef GB_COPY #define GB_COPY(pC,pA) \ memcpy (Cx + (pC)*asize, Ax + (pA)*asize, asize) ; #include "GB_split_sparse_template.c" } //------------------------------------------------------------------ // free workspace //------------------------------------------------------------------ GB_WERK_POP (C_ek_slicing, int64_t) ; //------------------------------------------------------------------ // advance to the next tile //------------------------------------------------------------------ if (inner < ninner - 1) { int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = akstart ; k < akend ; k++) { int64_t ck = k - akstart ; int64_t cknz = Cp [ck+1] - Cp [ck] ; Wp [k] += cknz ; } } //------------------------------------------------------------------ // conform the tile and save it in the Tiles array //------------------------------------------------------------------ C->magic = GB_MAGIC ; ASSERT_MATRIX_OK (C, "C for GB_split", GB0) ; GB_OK (GB_conform (C, Context)) ; if (csc) { GB_TILE (Tiles, inner, outer) = C ; } else { GB_TILE (Tiles, outer, inner) = C ; } C = NULL ; } } GB_FREE_WORK ; return (GrB_SUCCESS) ; }
test9.c
int g1; void foo (int a) { 0; if (1) { 2; #pragma omp barrier 3; } else { 4; foo(1); 5; } } int main() { int x; #pragma omp parallel { x = 101; 6; if (7) { 8; #pragma omp atomic write x = 102; foo(9); 10; } else { 11; #pragma omp atomic write x = 103; x = x; #pragma omp barrier 12; #pragma omp barrier 13; } 14; #pragma omp barrier 15; } }
rose_jacobi_seq.c
/* An example code * * */ #include <stdio.h> #include <math.h> #include <omp.h> void driver(); void initialize(); void jacobi(); void error_check(); #define MSIZE 200 int n; int m; int mits; double tol; double relax = 1.0; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; int main() { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n = 200; m = 200; tol = 0.0000000001; mits = 1000; driver(); return 1; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); /* Solve Helmholtz equation */ jacobi(); /* error_check (n,m,alpha,dx,dy,u,f) */ error_check(); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize() { int i; int j; int xx; int yy; // double PI = 3.1415926; // -->dx@112:2 dx = 2.0 / (n - 1); //-->dy@113:2 dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(i,j,xx,yy) #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi() { double omega; int i; int j; int k; double error; double resid; double ax; double ay; double b; omega = relax; /* * Initialize coefficients */ /* X-direction coef */ ax = 1.0 / (dx * dx); /* Y-direction coef */ ay = 1.0 / (dy * dy); /* Central coeff */ b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; error = 10.0 * tol; k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ //#pragma omp parallel { //#pragma omp for private(i,j) #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { uold[i][j] = u[i][j]; } } //#pragma omp for private(i,j,resid) reduction(+:error) nowait #pragma omp parallel for private (resid,i,j) reduction (+:error) for (i = 1; i <= n - 1 - 1; i += 1) { #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } } /* omp end parallel */ /* Error check */ // k = k + 1; error = sqrt(error) / (n * m); /* End iteration loop */ } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); } void error_check() { int i; int j; double xx; double yy; double temp; double error; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); error = 0.0; //#pragma omp parallel for private(i,j,xx,yy,temp) reduction(+:error) #pragma omp parallel for private (xx,yy,temp,i,j) reduction (+:error) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,temp,j) reduction (+:error) firstprivate (dx,dy) for (j = 0; j <= m - 1; j += 1) { xx = - 1.0 + dx * (i - 1); yy = - 1.0 + dy * (j - 1); temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy); error = error + temp * temp; } } error = sqrt(error) / (n * m); printf("Solution Error :%E \n",error); }
flush-2.c
int a, b; void foo (void) { #pragma omp flush #pragma omp flush (a, b) #pragma omp flush acquire #pragma omp flush release #pragma omp flush acq_rel #pragma omp flush relaxed /* { dg-error "expected 'acq_rel', 'release' or 'acquire'" } */ #pragma omp flush seq_cst /* { dg-error "expected 'acq_rel', 'release' or 'acquire'" } */ #pragma omp flush foobar /* { dg-error "expected 'acq_rel', 'release' or 'acquire'" } */ #pragma omp flush acquire (a, b) /* { dg-error "'flush' list specified together with memory order clause" } */ #pragma omp flush release (a, b) /* { dg-error "'flush' list specified together with memory order clause" } */ #pragma omp flush acq_rel (a, b) /* { dg-error "'flush' list specified together with memory order clause" } */ }
sptensor.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "sptensor.h" #include <stdlib.h> #include <string.h> /** * Create a new sparse tensor * @param tsr a pointer to an uninitialized sparse tensor * @param nmodes number of modes the tensor will have * @param ndims the dimension of each mode the tensor will have */ int sptNewSparseTensor(sptSparseTensor *tsr, sptIndex nmodes, const sptIndex ndims[]) { sptIndex i; int result; tsr->nmodes = nmodes; tsr->sortorder = malloc(nmodes * sizeof tsr->sortorder[0]); for(i = 0; i < nmodes; ++i) { tsr->sortorder[i] = i; } tsr->ndims = malloc(nmodes * sizeof *tsr->ndims); spt_CheckOSError(!tsr->ndims, "SpTns New"); memcpy(tsr->ndims, ndims, nmodes * sizeof *tsr->ndims); tsr->nnz = 0; tsr->inds = malloc(nmodes * sizeof *tsr->inds); spt_CheckOSError(!tsr->inds, "SpTns New"); for(i = 0; i < nmodes; ++i) { result = sptNewIndexVector(&tsr->inds[i], 0, 0); spt_CheckError(result, "SpTns New", NULL); } result = sptNewValueVector(&tsr->values, 0, 0); spt_CheckError(result, "SpTns New", NULL); return 0; } /** * Copy a sparse tensor * @param[out] dest a pointer to an uninitialized sparse tensor * @param[in] src a pointer to a valid sparse tensor */ int sptCopySparseTensor(sptSparseTensor *dest, const sptSparseTensor *src, int const nt) { sptIndex i; int result; dest->nmodes = src->nmodes; dest->sortorder = malloc(src->nmodes * sizeof src->sortorder[0]); memcpy(dest->sortorder, src->sortorder, src->nmodes * sizeof src->sortorder[0]); dest->ndims = malloc(dest->nmodes * sizeof *dest->ndims); spt_CheckOSError(!dest->ndims, "SpTns Copy"); memcpy(dest->ndims, src->ndims, src->nmodes * sizeof *src->ndims); dest->nnz = src->nnz; dest->inds = malloc(dest->nmodes * sizeof *dest->inds); spt_CheckOSError(!dest->inds, "SpTns Copy"); for(i = 0; i < dest->nmodes; ++i) { result = sptCopyIndexVector(&dest->inds[i], &src->inds[i], nt); spt_CheckError(result, "SpTns Copy", NULL); } result = sptCopyValueVector(&dest->values, &src->values, nt); spt_CheckError(result, "SpTns Copy", NULL); return 0; } /** * Release any memory the sparse tensor is holding * @param tsr the tensor to release */ void sptFreeSparseTensor(sptSparseTensor *tsr) { sptIndex i; for(i = 0; i < tsr->nmodes; ++i) { sptFreeIndexVector(&tsr->inds[i]); } free(tsr->sortorder); free(tsr->ndims); free(tsr->inds); sptFreeValueVector(&tsr->values); tsr->nmodes = 0; } double SparseTensorFrobeniusNormSquared(sptSparseTensor const * const spten) { double norm = 0; sptValue const * const restrict vals = spten->values.data; #ifdef PARTI_USE_OPENMP #pragma omp parallel for reduction(+:norm) #endif for(sptNnzIndex n=0; n < spten->nnz; ++n) { norm += vals[n] * vals[n]; } return norm; } int spt_DistSparseTensor(sptSparseTensor * tsr, int const nthreads, sptNnzIndex * const dist_nnzs, sptIndex * dist_nrows) { sptNnzIndex global_nnz = tsr->nnz; sptNnzIndex aver_nnz = global_nnz / nthreads; memset(dist_nnzs, 0, nthreads*sizeof(sptNnzIndex)); memset(dist_nrows, 0, nthreads*sizeof(sptIndex)); sptSparseTensorSortIndex(tsr, 0, nthreads); sptIndex * ind0 = tsr->inds[0].data; int ti = 0; dist_nnzs[0] = 1; dist_nrows[0] = 1; for(sptNnzIndex x=1; x<global_nnz; ++x) { if(ind0[x] == ind0[x-1]) { ++ dist_nnzs[ti]; } else if (ind0[x] > ind0[x-1]) { if(dist_nnzs[ti] < aver_nnz || ti == nthreads-1) { ++ dist_nnzs[ti]; ++ dist_nrows[ti]; } else { ++ ti; ++ dist_nnzs[ti]; ++ dist_nrows[ti]; } } else { spt_CheckError(SPTERR_VALUE_ERROR, "SpTns Dist", "tensor unsorted on mode-0"); } } return 0; } int spt_DistSparseTensorFixed(sptSparseTensor * tsr, int const nthreads, sptNnzIndex * const dist_nnzs) { sptNnzIndex global_nnz = tsr->nnz; sptNnzIndex aver_nnz = global_nnz / nthreads; memset(dist_nnzs, 0, nthreads*sizeof(sptNnzIndex)); sptSparseTensorSortIndex(tsr, 0, nthreads); sptIndex * ind0 = tsr->inds[0].data; int ti = 0; dist_nnzs[0] = 1; for(sptNnzIndex x=1; x<global_nnz; ++x) { if(ind0[x] == ind0[x-1]) { ++ dist_nnzs[ti]; } else if (ind0[x] > ind0[x-1]) { if(dist_nnzs[ti] < aver_nnz || ti == nthreads-1) { ++ dist_nnzs[ti]; } else { ++ ti; ++ dist_nnzs[ti]; } } else { spt_CheckError(SPTERR_VALUE_ERROR, "SpTns Dist", "tensor unsorted on mode-0"); } } return 0; } /** * Shuffle all indices. * * @param[in] tsr tensor to be shuffled * @param[out] map_inds is the renumbering mapping * */ void sptSparseTensorShuffleIndices(sptSparseTensor *tsr, sptIndex ** map_inds) { /* Renumber nonzero elements */ sptIndex tmp_ind; for(sptNnzIndex z = 0; z < tsr->nnz; ++z) { for(sptIndex m = 0; m < tsr->nmodes; ++m) { tmp_ind = tsr->inds[m].data[z]; tsr->inds[m].data[z] = map_inds[m][tmp_ind]; } } } /** * Inverse-Shuffle all indices. * * @param[in] tsr tensor to be shuffled * @param[out] map_inds is the renumbering mapping * */ void sptSparseTensorInvMap(sptSparseTensor *tsr, sptIndex ** map_inds) { sptIndex ** tmp_map_inds = (sptIndex **)malloc(tsr->nmodes * sizeof(sptIndex*)); // spt_CheckOSError(!tmp_map_inds, "sptSparseTensorInvMap"); for(sptIndex m = 0; m < tsr->nmodes; ++m) { tmp_map_inds[m] = (sptIndex *)malloc(tsr->ndims[m] * sizeof (sptIndex)); // spt_CheckError(!tmp_map_inds[m], "sptSparseTensorInvMap", NULL); } sptIndex tmp_ind; for(sptIndex m = 0; m < tsr->nmodes; ++m) { sptIndex loc = 0; for(sptNnzIndex i = 0; i < tsr->ndims[m]; ++i) { tmp_ind = map_inds[m][i]; tmp_map_inds[m][tmp_ind] = loc; ++ loc; } } for(sptIndex m = 0; m < tsr->nmodes; ++m) { for(sptNnzIndex i = 0; i < tsr->ndims[m]; ++i) { map_inds[m][i] = tmp_map_inds[m][i]; } } for(sptIndex m = 0; m < tsr->nmodes; ++m) { free(tmp_map_inds[m]); } free(tmp_map_inds); }
convolution_winograd_dot_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_winograd_dot_pack4_fp16sa_neon(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt) { // Mat bottom_blob_tm(tiles, 16/36/64, inch, 8u, 4, opt.workspace_allocator); const int tiles = bottom_blob_tm.w; const int batch = bottom_blob_tm.h; const int inch = bottom_blob_tm.c; // permute Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, batch, 8u, 4, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, batch, 8u, 4, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, batch, 8u, 4, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < batch; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, batch, outch, 8u, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < batch; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v28.8h, v4.8h, v0.h[4] \n" "fmla v29.8h, v4.8h, v0.h[5] \n" "fmla v30.8h, v4.8h, v0.h[6] \n" "fmla v31.8h, v4.8h, v0.h[7] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v28.8h, v5.8h, v1.h[4] \n" "fmla v29.8h, v5.8h, v1.h[5] \n" "fmla v30.8h, v5.8h, v1.h[6] \n" "fmla v31.8h, v5.8h, v1.h[7] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "fmla v28.8h, v6.8h, v2.h[4] \n" "fmla v29.8h, v6.8h, v2.h[5] \n" "fmla v30.8h, v6.8h, v2.h[6] \n" "fmla v31.8h, v6.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "fmla v28.8h, v7.8h, v3.h[4] \n" "fmla v29.8h, v7.8h, v3.h[5] \n" "fmla v30.8h, v7.8h, v3.h[6] \n" "fmla v31.8h, v7.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; r0 += 4; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < batch; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v28.4h, v4.4h, v0.h[4] \n" "fmla v29.4h, v4.4h, v0.h[5] \n" "fmla v30.4h, v4.4h, v0.h[6] \n" "fmla v31.4h, v4.4h, v0.h[7] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v28.4h, v5.4h, v1.h[4] \n" "fmla v29.4h, v5.4h, v1.h[5] \n" "fmla v30.4h, v5.4h, v1.h[6] \n" "fmla v31.4h, v5.4h, v1.h[7] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "fmla v28.4h, v6.4h, v2.h[4] \n" "fmla v29.4h, v6.4h, v2.h[5] \n" "fmla v30.4h, v6.4h, v2.h[6] \n" "fmla v31.4h, v6.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "fmla v28.4h, v7.4h, v3.h[4] \n" "fmla v29.4h, v7.4h, v3.h[5] \n" "fmla v30.4h, v7.4h, v3.h[6] \n" "fmla v31.4h, v7.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); _sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3); kptr += 16; r0 += 4; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
image_manipulation.h
////////////////////////////////////////////////////////////////////////// // Software License Agreement (BSD License) // // // // Copyright (c) 2009 // // Engin Tola // // web : http://cvlab.epfl.ch/~tola // // email : engin.tola@epfl.ch // // // // All rights reserved. // // // // Redistribution and use in source and binary forms, with or without // // modification, are permitted provided that the following conditions // // are met: // // // // * Redistributions of source code must retain the above copyright // // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // // copyright notice, this list of conditions and the following // // disclaimer in the documentation and/or other materials provided // // with the distribution. // // * Neither the name of the EPFL nor the names of its // // contributors may be used to endorse or promote products derived // // from this software without specific prior written permission. // // // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // // POSSIBILITY OF SUCH DAMAGE. // // // // See licence.txt file for more details // ////////////////////////////////////////////////////////////////////////// #ifndef KUTILITY_IMAGE_MANIPULATION_H #define KUTILITY_IMAGE_MANIPULATION_H #include "kutility/kutility.def" #include "kutility/general.h" namespace kutility { template<typename T1, typename T2> void scale( T1* src, int h, int w, float sc, T2* dst, int dh, int dw ) { int nh = int( h*sc ); int nw = int( w*sc ); assert( dst != NULL ); assert( nh == dh ); assert( nw == dw ); if( sc == 1 ) { for( int i=0; i<h*w; i++ ) dst[i] = (T2)src[i]; return; } double scale_factor = 1.0 / sc; memset(dst, 0, sizeof(T2)*dh*dw ); float y,x; for( int ny=0; ny<nh; ny++ ) { y = ny * scale_factor; if( y>= h-1 ) continue; for( int nx=0; nx<nw; nx++ ) { x = nx * scale_factor; if( x>= w-1 ) continue; dst[ny*nw+nx] = (T2)bilinear_interpolation(src, w, x, y); } } } template<class T> inline void rgb_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[3*index ]; float g=cim[3*index+1]; float b=cim[3*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void y_to_rgb(T* yim, int h, int w, T* rgbim ) { assert( rgbim != NULL ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[ 3*k ] = yim[k]; rgbim[ 3*k+1 ] = yim[k]; rgbim[ 3*k+2 ] = yim[k]; } } template<class T> inline void rgb_to_bgr(T* rgb, int h, int w, T* bgr ) { assert( bgr != NULL ); int wh3 = w*h*3; for( int k=0; k<wh3; k+=3 ) { T tmp = bgr[k]; rgb[ k ] = bgr[ k+2 ]; rgb[ k+1 ] = bgr[ k+1 ]; rgb[ k+2 ] = tmp; } } template<class T> inline void bgr_to_rgb(T* bgr, int h, int w, T* rgb ) { rgb_to_bgr(bgr,h,w,rgb); } template<class T> inline void rgba_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[4*index ]; float g=cim[4*index+1]; float b=cim[4*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void rgba_to_rgb(T* rgbaim, int h, int w, T* rgbim ) { assert( (rgbim!=NULL) && (rgbaim!=NULL) ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[3*k ] = rgbaim[4*k ]; rgbim[3*k+1] = rgbaim[4*k+1]; rgbim[3*k+2] = rgbaim[4*k+2]; } } uchar* clean_image (uchar * &image, int w, int h, bool in_place=false); uchar* apply_erosion (uchar * &image, int w, int h, bool in_place=false); uchar* apply_dilation(uchar * &image, int w, int h, bool in_place=false); uchar* down_sample (uchar * image, int w, int h); uchar* resize_image( uchar* &image, int h, int w, int nh, int nw, bool in_place=false); /// scales the image intensity between a lower "il" and an upper /// "iu" value. "sz" is the image size. /// by deafult il=0 and ui = 1; double* scale_intensity( uchar* image, int sz, double il=0, double iu=1); template<class T> void decompose_channels( T* image, int h, int w, T* &ch_0, T* &ch_1, T* &ch_2) { int image_size = h*w; ch_0 = kutility::allocate<uchar>(image_size); ch_1 = kutility::allocate<uchar>(image_size); ch_2 = kutility::allocate<uchar>(image_size); #if defined(WITH_OPENMP) #pragma omp parallel for #endif for( int y=0; y<h; y++ ) { int yw = y*w; for( int x=0; x<w; x++ ) { int index = yw+x; int cindex = 3*index; ch_0[index] = image[index ]; ch_1[index] = image[index+1]; ch_2[index] = image[index+2]; } } } /// applies gamma correction template<class T> inline T* gamma_correction( T* im, int h, int w, double gamma, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; double val; for( int i=0; i<sz; i++ ) { val = (pow( (double)im[i], gamma )); if( val > 255 ) out[i] = (T)255; else out[i] = (T)val; } return out; } /// adds some noise to the pixels template<class T> inline T* add_noise( T* im, int h, int w, int noise_level, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; for( int i=0; i<sz; i++ ) { int sign = 1; if( rand()/(double)RAND_MAX < 0.5 ) sign = -1; out[i] = im[i] + sign * rand()/(double)RAND_MAX * noise_level; } return out; } } #endif
NLmean_propag1dir_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.c
/* * compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++ * in the terminal: export OMP_NUM_THREADS=3 */ #include<stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <netcdf.h> #include <omp.h> /* This is the name of the data file we will read. */ #define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing6.nc" #define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag1dir_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.nc" /* all constants */ #define N_HR 96 #define SCALE_FACTOR_SPACE 6 #define SCALE_FACTOR_TIME 4 #define SIM_HAFTSIZE 12 #define ACC_HAFTSIZE 12 #define NEIGHBOR_HAFTSIZE 5 #define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1) #define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1) #define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1) #define TAU 0.1 #define NUM_VARS 1 #define NUM_SCALES 2 #define NUM_3DSNAPS 37 /* #3D snapshots */ #define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/ #define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */ #define NDIMS 4 /* Handle errors by printing an error message and exiting with a non-zero status. */ #define ERRCODE 2 #define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);} /* **********************************************************************************/ /* ****************************** USEFUL FUNCTIONS **********************************/ /* **********************************************************************************/ /* * get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end] */ void get_onesnap(double *arr1,double *arr2, int id_start, int id_end) { for (int i = id_start; i < id_end + 1; i++) arr2[i - id_start] = arr1[i]; } /* * put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2 */ void put_onesnap(double *arr1,double *arr2, int id_start, int id_end) { for (int i = id_start; i < id_end + 1; i++) arr1[i] = arr2[i - id_start]; } /* * norm_by_weight: normalize x[dim] by weight W[dim] */ void norm_by_weight(int dim, double *x, double *W) { for (int k = 0; k < dim; k++) x[k] = x[k]/W[k]; } void add_mat(int dim, double *sum, double *x1, double *x2) { for (int k = 0; k < dim; k++) sum[k] = x1[k] + x2[k]; } void initialize(int dim, double *x, double val) { for (int k = 0; k < dim; k++) x[k] = val; } /* **********************************************************************************/ /* ****************************** NETCDF UTILS **************************************/ /* **********************************************************************************/ /* * creat_netcdf: create the netcdf file [filename] contain [num_vars] variables * variable names are [varname] */ void create_netcdf(char *filename, int num_vars, char *varname[num_vars]) { int ncid_wr, retval_wr; int vel_varid_wr; int Nt, Nx, Ny, Nz; int dimids[NDIMS]; /* Create the file. */ if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr))) ERR(retval_wr); /* Define the dimensions. The record dimension is defined to have * unlimited length - it can grow as needed.*/ if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny))) ERR(retval_wr); if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz))) ERR(retval_wr); if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt))) ERR(retval_wr); /* Define the netCDF variables for the data. */ dimids[0] = Nt; dimids[1] = Nx; dimids[2] = Ny; dimids[3] = Nz; for (int i = 0; i<num_vars; i++) { if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr))) ERR(retval_wr); } /* End define mode (SHOULD NOT FORGET THIS!). */ if ((retval_wr = nc_enddef(ncid_wr))) ERR(retval_wr); /* Close the file. */ if ((retval_wr = nc_close(ncid_wr))) ERR(retval_wr); printf("\n *** SUCCESS creating file: %s!\n", filename); } /* * write_netcdf: * write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start] */ void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps) { int ncid_wr, retval_wr; int vel_varid_wr; /* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/ if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr))) ERR(retval_wr); /* Get variable*/ if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr))) ERR(retval_wr);; /* Put variable*/ if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0]))) ERR(retval_wr); /* Close the file. */ if ((retval_wr = nc_close(ncid_wr))) ERR(retval_wr); printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename); } /* * read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] * started at [snap_start] */ void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps) { int ncid_rd, retval_rd; int vel_varid_rd; /* ******** PREPARE TO READ ************* */ /* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/ if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd))) ERR(retval_rd); /* Get the varids of the velocity in netCDF */ if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd))) ERR(retval_rd); if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0]))) ERR(retval_rd); /* Close the file, freeing all resources. */ if ((retval_rd = nc_close(ncid_rd))) ERR(retval_rd); printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename); } /* **********************************************************************************/ /* ****************************** ESTIMATE_DISTANCE *********************************/ /* **********************************************************************************/ /* * estimate_distance: estimate the distances between ref patch and moving patches (prev and after) * patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1) * reference patch are centered at [center_ref_idy, center_ref_idz] * moving patches are centered at [center_moving_idy, center_moving_idz] * dist_all contain 2 elements: distances to moving patches in the prev and after plane * x_ref: reference plane * x_prev: previous plane * x_after: plane after * ref_ids_y(z): indices of points in reference patch * moving_ids_y(z): indices of points in moving patch */ void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids) { int neighbor_id, sim_id; int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE]; for (int m = 0; m < NEIGHBOR_FULLSIZE; m++) { for (int n = 0; n < NEIGHBOR_FULLSIZE; n++) { gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE; gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE; } } int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE]; for (int p = 0; p < SIM_FULLSIZE; p++) { for (int q = 0; q < SIM_FULLSIZE; q++) { gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE; gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE; } } int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE]; for (int p = 0; p < SIM_FULLSIZE; p++) for (int q = 0; q < SIM_FULLSIZE; q++) grid_sim[p][q] = p * SIM_FULLSIZE + q; for (int p = 0; p < ACC_FULLSIZE; p++) for (int q = 0; q < ACC_FULLSIZE; q++) acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q]; int valy, valz; long int grid_id; for (int i = 0; i < N_HR; i++) { for (int j = 0; j < N_HR; j++) { for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++) { for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++) { grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id; valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id]; valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id]; if (valy < 0) gridpatches_y[grid_id] = (N_HR - 1) + valy; else if (valy > (N_HR - 1)) gridpatches_y[grid_id] = valy - (N_HR - 1); else gridpatches_y[grid_id] = valy; if (valz < 0) gridpatches_z[grid_id] = (N_HR - 1) + valz; else if (valz > (N_HR - 1)) gridpatches_z[grid_id] = valz - (N_HR - 1); else gridpatches_z[grid_id] = valz; } } } } //printf("\n gridpatches_z: %i \n", gridpatches_y[0]); } /* **********************************************************************************/ /* ****************************** NLMEAN *********************************/ /* **********************************************************************************/ /* * estimate_distance: estimate the distances between ref patch and moving patches (prev and after) * patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1) * reference patch are centered at [center_ref_idy, center_ref_idz] * moving patches are centered at [center_moving_idy, center_moving_idz] * dist_all contain 2 elements: distances to moving patches in the prev and after plane * x_ref: reference plane * x_prev: previous plane * x_after: plane after * ref_ids_y(z): indices of points in reference patch * moving_ids_y(z): indices of points in moving patch */ /*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE], int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE], int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/ void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids) { double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE)); int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE; int est_idy; #pragma omp parallel for private (est_idy) for (est_idy = 0; est_idy < N_HR; est_idy++) for (int est_idz = 0; est_idz < N_HR; est_idz++) for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++) { int ref_idy, ref_idz, moving_idy, moving_idz; double du; double d = 0.0; long int grid_rid, grid_nid; for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++) { grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ; grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si; ref_idy = gridy[grid_rid]; moving_idy = gridy[grid_nid]; ref_idz = gridz[grid_rid]; moving_idz = gridz[grid_nid]; //compute distance btw reference patch and fusion patch du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz]; d = d + norm_fact*du*du; } double w = exp(-d/(2.0*TAU*TAU)); for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++) { int ai = accids[k]; grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ; grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai; ref_idy = gridy[grid_rid]; moving_idy = gridy[grid_nid]; ref_idz = gridz[grid_rid]; moving_idz = gridz[grid_nid]; x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz]; weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w; } //printf("\n w=%f\n ",w); } } void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset) { for (int t_est = t_first + 1; t_est <= t_bound1; t_est++) { int t_prev = t_est - 1; double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); // Propagation from previous planes NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); } } void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset) { for (int t_est = t_last - 1; t_est >= t_bound2; --t_est) { int t_prev = t_est + 1; double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); // Propagation from previous planes NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); } } void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset) { double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; int t_prev = t_mid - 1; int t_after = t_mid + 1; //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1); NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1); } void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset) { double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR]; double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR]; int tc = (int)SCALE_FACTOR_TIME/2; if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; } for (int td = 1; td < tc; td++) { int t1 = t_first + td; // bound on left side int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side // Initialize with zeros initialize(N_HR * N_HR, xref1_hf, 0.0); initialize(N_HR * N_HR, w1, 0.0); initialize(N_HR * N_HR, xref2_hf, 0.0); initialize(N_HR * N_HR, w2, 0.0); get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1); //Propagate from left bound get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids); //Propagate from right bound get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref1_hf, w1); put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1); norm_by_weight(N_HR*N_HR, xref2_hf, w2); put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1); } // Last plane in the center if (SCALE_FACTOR_TIME % 2 == 0) { initialize(N_HR * N_HR, xref1_hf, 0.0); initialize(N_HR * N_HR, w1, 0.0); get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); norm_by_weight(N_HR*N_HR, xref1_hf, w1); put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1); } } /* **********************************************************************************/ /* ********************************** MAIN FUNCTION *********************************/ /* **********************************************************************************/ int main() { /* Creat the file to save results */ char *varnames[NUM_VARS] = {"x_rec_all"}; create_netcdf(FILENAME_WR, NUM_VARS, varnames); /* Allocate memory */ double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); /* read all snapshots */ size_t start_ids[4] = {0, 0, 0, 0}; size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR }; read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all); read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all); double time_all_start = omp_get_wtime(); double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double)); double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double)); double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double)); long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE; int *gridpatches_y = (int*)malloc(grid_size * sizeof(int)); int *gridpatches_z = (int*)malloc(grid_size * sizeof(int)); int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int)); generate_grids(gridpatches_y, gridpatches_z, acc_ids); for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++) { int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR; // put first PIV get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1); put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1); int block_id; for(block_id = 0; block_id < NUM_BLOCKS; block_id++) { double time_start = omp_get_wtime(); int t_first = SCALE_FACTOR_TIME*block_id; int t_last = SCALE_FACTOR_TIME*(block_id+1); // Put last PIV of the block get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1); put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1); if (SCALE_FACTOR_TIME % 2) { int t_bound1 = t_first + (int)SCALE_FACTOR_TIME/2; int t_bound2 = t_bound1 + 1; propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset); propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset); } else { int t_mid = t_first + (int)SCALE_FACTOR_TIME/2; int t_bound1 = t_mid - 1; int t_bound2 = t_mid + 1; propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset); propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset); propag_2planes(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_mid, t_offset); printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start); } } } // Write to file write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all); /* free memory */ free(x_rec); free(x_current_lf); free(x_current_hf); free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all); free(gridpatches_y); free(gridpatches_z); free(acc_ids); printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start); return 1; }
LIBIRWLS-predict.c
/* ============================================================================ Author : Roberto Diaz Morales ============================================================================ Copyright (c) 2016 Roberto Díaz Morales Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================ */ /** * @brief Implementation of the predictions functions to classify data using a trained model. * * See LIBIRWLS-predict.h for a detailed description of its functions and parameters. * @file LIBIRWLS-predict.c * @author Roberto Diaz Morales * @date 23 Aug 2016 * * @see LIBIRWLS-predict.h */ #include <omp.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "kernels.h" #include "LIBIRWLS-predict.h" /** * @cond */ /** * @brief Function to obtain the soft output of the classifier. * * Function to obtain the soft output (the output of the classifier before using the threshold to decide +1 or -1) of the model on a dataset. * @param dataset The test set. * @param mymodel A trained SVM model. * @param props The test properties. * @return The output of the classifier for every test sample (soft output). */ double *softTest(svm_dataset dataset, model mymodel,predictProperties props){ int i,j; double *predictions=(double *) malloc((dataset.l)*sizeof(double)); #pragma omp parallel default(shared) private(i,j) { #pragma omp for schedule(static) for (i=0;i<dataset.l;i++){ // Iteration over all the training elements double pred=mymodel.bias; for (j=0;j<mymodel.nSVs;j++){ // Iteration over the Support Vectors pred+=(mymodel.weights[j])*kernelTest(dataset, i, mymodel, j); } predictions[i]=pred; } } // Obtaining accuracy (only for labeled test dataset) double aciertos=0.0; double total=(double)dataset.l; if(props.Labels==1){ for (i=0;i<dataset.l;i++){ if(predictions[i]>0 & dataset.y[i]>0) aciertos++; if(predictions[i]<=0 & dataset.y[i]<=0) aciertos++; } printf("Accuracy: %f\n",aciertos/total); } return predictions; } /** * @brief Function to classify data in a labeled dataset and to obtain the accuracy. * * Function to classify data in a labeled dataset and to obtain the accuracy. * @param dataset The test set. * @param mymodel A trained SVM model. * @param props The test properties. * @return The output of the classifier for every test sample. */ double *test(svm_dataset dataset, model mymodel,predictProperties props){ int i,j; double *predictions=(double *) malloc((dataset.l)*sizeof(double)); #pragma omp parallel default(shared) private(i,j) { #pragma omp for schedule(static) for (i=0;i<dataset.l;i++){ // Iteration over all the training elements double pred=mymodel.bias; for (j=0;j<mymodel.nSVs;j++){ // Iteration over the Support Vectors pred+=(mymodel.weights[j])*kernelTest(dataset, i, mymodel, j); } predictions[i]=pred; if(predictions[i]>=0.0) predictions[i]=1.0; else predictions[i]=-1.0; } } // Obtaining accuracy (only for labeled test dataset) double aciertos=0.0; double total=(double)dataset.l; if(props.Labels==1){ for (i=0;i<dataset.l;i++){ if(predictions[i]>0 & dataset.y[i]>0) aciertos++; if(predictions[i]<=0 & dataset.y[i]<=0) aciertos++; } printf("Accuracy: %f\n",aciertos/total); } return predictions; } /** * @brief It shows the command line instructions in the standard output. * * It shows the command line instructions in the standard output. */ void printPredictInstructions(void) { fprintf(stderr, "LIBIRWLS-predict: This software predicts the label of a SVM given a data set of samples and a model obtained with full-train or budgeted-train"); fprintf(stderr, "and store the results in an output file.\n\n"); fprintf(stderr, "Usage: LIBIRWLS-predict [options] data_set_file model_file output_file\n\n"); fprintf(stderr, "Options:\n"); fprintf(stderr, " -t Number of Threads: (default 1)\n"); fprintf(stderr, " -l type of data set: (default 0)\n"); fprintf(stderr, " 0 -- Data set with no target as first dimension.\n"); fprintf(stderr, " 1 -- Data set with label as first dimension (obtains accuracy too)\n"); fprintf(stderr, " -s Soft output: (default 0)\n"); fprintf(stderr, " 0 -- Obtains the class of every data (It takes values of +1 or -1).\n"); fprintf(stderr, " 1 -- The output before the class decision (Useful to combine in ensembles with other algorithms).\n"); fprintf(stderr, " -f file format: (default 1)\n"); fprintf(stderr, " 0 -- CSV format (comma separator)\n"); fprintf(stderr, " 1 -- libsvm format\n"); fprintf(stderr, " -p separator: csv separator character (default \",\" if csv format is selected)\n"); fprintf(stderr, " -v verbose: (default 1)\n"); fprintf(stderr, " 0 -- No screen messages\n"); fprintf(stderr, " 1 -- Screen messages\n"); } /** * @brief It parses the prediction parameters from the command line. * * It parses the prediction parameters from the command line. * @param argc The number of words of the command line. * @param argv The list of words of the command line. * @return A struct that contains the values of the test parameters. */ predictProperties parsePredictParameters(int* argc, char*** argv) { predictProperties props; props.Labels=0; props.Threads=1; props.Soft=0; props.file = 1; props.separator = ","; props.verbose = 1; int i; for (i = 1; i < *argc; ++i) { if ((*argv)[i][0] != '-') break; if (++i >= *argc) { printPredictInstructions(); exit(1); } char* param_name = &(*argv)[i-1][1]; char* param_value = (*argv)[i]; if (strcmp(param_name, "t") == 0) { props.Threads = atof(param_value); } else if (strcmp(param_name, "f") == 0) { props.file = atoi(param_value); } else if (strcmp(param_name, "p") == 0) { props.separator = param_value; } else if (strcmp(param_name, "v") == 0) { props.verbose = atoi(param_value); } else if (strcmp(param_name, "l") == 0) { props.Labels = atoi(param_value); if(props.Labels !=0 && props.Labels !=1){ printf("\nInvalid type of test data set:%d\n",props.Labels); exit(2); } } else if (strcmp(param_name, "s") == 0) { props.Soft = atoi(param_value); if(props.Soft !=0 && props.Soft !=1){ printf("\nInvalid type of output (-s param):%d\n",props.Soft); exit(2); } } else { fprintf(stderr, "Unknown parameter %s\n",param_name); printPredictInstructions(); exit(2); } } int j; for (j = 1; i + j - 1 < *argc; ++j) { (*argv)[j] = (*argv)[i + j - 1]; } *argc -= i - 1; return props; } /** * @endcond */
stat_ops_dm.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "stat_ops_dm.h" #include "utility.h" #include "constant.h" // calculate norm double dm_state_norm_squared(const CTYPE *state, ITYPE dim) { ITYPE index; double norm = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+:norm) #endif for (index = 0; index < dim; ++index){ norm += creal(state[index*dim+index]); } return norm; } // calculate entropy of probability distribution of Z-basis measurements double dm_measurement_distribution_entropy(const CTYPE *state, ITYPE dim){ ITYPE index; double ent=0; const double eps = 1e-15; #ifdef _OPENMP #pragma omp parallel for reduction(+:ent) #endif for(index = 0; index < dim; ++index){ double prob = creal(state[index*dim + index]); if(prob > eps){ ent += -1.0*prob*log(prob); } } return ent; } // calculate probability with which we obtain 0 at target qubit double dm_M0_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){ const ITYPE loop_dim = dim/2; const ITYPE mask = 1ULL << target_qubit_index; ITYPE state_index; double sum =0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index=0;state_index<loop_dim;++state_index){ ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index); sum += creal(state[basis_0*dim+basis_0]); } return sum; } // calculate probability with which we obtain 1 at target qubit double dm_M1_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){ const ITYPE loop_dim = dim/2; const ITYPE mask = 1ULL << target_qubit_index; ITYPE state_index; double sum =0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index=0;state_index<loop_dim;++state_index){ ITYPE basis_1 = insert_zero_to_basis_index(state_index,mask,target_qubit_index) ^ mask; sum += creal(state[basis_1*dim + basis_1]); } return sum; } // calculate merginal probability with which we obtain the set of values measured_value_list at sorted_target_qubit_index_list // warning: sorted_target_qubit_index_list must be sorted. double dm_marginal_prob(const UINT* sorted_target_qubit_index_list, const UINT* measured_value_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){ ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE state_index; double sum=0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index = 0;state_index < loop_dim; ++state_index){ ITYPE basis = state_index; for(UINT cursor=0; cursor < target_qubit_index_count ; cursor++){ UINT insert_index = sorted_target_qubit_index_list[cursor]; ITYPE mask = 1ULL << insert_index; basis = insert_zero_to_basis_index(basis, mask , insert_index ); basis ^= mask * measured_value_list[cursor]; } sum += creal(state[basis*dim+basis]); } return sum; } void dm_state_add(const CTYPE *state_added, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim*dim; ++index) { state[index] += state_added[index]; } } void dm_state_multiply(CTYPE coef, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim*dim; ++index) { state[index] *= coef; } } double dm_expectation_value_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim) { CTYPE sum = 0; for (ITYPE state_index = 0; state_index < dim; ++state_index) { CTYPE coef = 1.0; ITYPE state_index_sub = state_index; for (UINT i = 0; i < target_qubit_index_count; ++i) { UINT pauli_type = Pauli_operator_type_list[i]; UINT target_qubit_index = target_qubit_index_list[i]; if (pauli_type == 1) { state_index_sub ^= ((1ULL) << target_qubit_index); } else if (pauli_type == 2) { coef *= 1.i; if (state_index_sub & ((1ULL) << target_qubit_index)) { coef *= -1.; } state_index_sub ^= ((1ULL) << target_qubit_index); } else if (pauli_type == 3) { if (state_index_sub & ((1ULL) << target_qubit_index)) { coef *= -1.; } } } sum += coef * state[state_index * dim + state_index_sub]; } return creal(sum); } void dm_state_tensor_product(const CTYPE* state_left, ITYPE dim_left, const CTYPE* state_right, ITYPE dim_right, CTYPE* state_dst) { ITYPE y_left, x_left, y_right, x_right; const ITYPE dim_new = dim_left * dim_right; for (y_left = 0; y_left < dim_left; ++y_left) { for (x_left = 0; x_left < dim_left; ++x_left) { CTYPE val_left = state_left[y_left * dim_left + x_left]; for (y_right = 0; y_right < dim_right; ++y_right) { for (x_right = 0; x_right < dim_right; ++x_right) { CTYPE val_right = state_right[y_right * dim_right + x_right]; ITYPE x_new = x_left * dim_right + x_right; ITYPE y_new = y_left * dim_right + y_right; state_dst[y_new * dim_new + x_new] = val_right * val_left; } } } } } void dm_state_permutate_qubit(const UINT* qubit_order, const CTYPE* state_src, CTYPE* state_dst, UINT qubit_count, ITYPE dim) { ITYPE y, x; for (y = 0; y < dim; ++y) { for (x = 0; x < dim; ++x) { ITYPE src_x = 0, src_y = 0; for (UINT qubit_index = 0; qubit_index < qubit_count; ++qubit_index) { if ((x >> qubit_index) % 2) { src_x += 1ULL << qubit_order[qubit_index]; } if ((y >> qubit_index) % 2) { src_y += 1ULL << qubit_order[qubit_index]; } } state_dst[y * dim + x] = state_src[src_y * dim + src_x]; } } } void dm_state_partial_trace_from_density_matrix(const UINT* target, UINT target_count, const CTYPE* state_src, CTYPE* state_dst, ITYPE dim) { ITYPE dst_dim = dim >> target_count; ITYPE trace_dim = 1ULL << target_count; UINT* sorted_target = create_sorted_ui_list(target, target_count); ITYPE* mask_list = create_matrix_mask_list(target, target_count); ITYPE y,x; for (y = 0; y < dst_dim; ++y) { for (x = 0; x < dst_dim; ++x) { ITYPE base_x = x; ITYPE base_y = y; for (UINT target_index = 0; target_index < target_count; ++target_index) { UINT insert_index = sorted_target[target_index]; base_x = insert_zero_to_basis_index(base_x, 1ULL << insert_index, insert_index); base_y = insert_zero_to_basis_index(base_y, 1ULL << insert_index, insert_index); } CTYPE val = 0.; for (ITYPE idx = 0; idx < trace_dim; ++idx) { ITYPE src_x = base_x ^ mask_list[idx]; ITYPE src_y = base_y ^ mask_list[idx]; val += state_src[src_y * dim + src_x]; } state_dst[y*dst_dim + x] = val; } } free(sorted_target); free(mask_list); } void dm_state_partial_trace_from_state_vector(const UINT* target, UINT target_count, const CTYPE* state_src, CTYPE* state_dst, ITYPE dim) { ITYPE dst_dim = dim >> target_count; ITYPE trace_dim = 1ULL << target_count; UINT* sorted_target = create_sorted_ui_list(target, target_count); ITYPE* mask_list = create_matrix_mask_list(target, target_count); ITYPE y, x; for (y = 0; y < dst_dim; ++y) { for (x = 0; x < dst_dim; ++x) { ITYPE base_x = x; ITYPE base_y = y; for (UINT target_index = 0; target_index < target_count; ++target_index) { UINT insert_index = sorted_target[target_index]; base_x = insert_zero_to_basis_index(base_x, 1ULL << insert_index, insert_index); base_y = insert_zero_to_basis_index(base_y, 1ULL << insert_index, insert_index); } CTYPE val = 0.; for (ITYPE idx = 0; idx < trace_dim; ++idx) { ITYPE src_x = base_x ^ mask_list[idx]; ITYPE src_y = base_y ^ mask_list[idx]; val += state_src[src_y] * conj(state_src[src_x]); } state_dst[y*dst_dim + x] = val; } } free(sorted_target); free(mask_list); }
mupat.c
/** * @file mupat.c * @brief functions in MuPAT * @author Hotaka Yagi * @date last update 2020 Jan 15 */ #include "mupat.h" //----------------------------------------------------------------- // Double - Double-Double operation //----------------------------------------------------------------- /** * * @fn _d_a_dd(double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of double and do uble-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_a_dd(double *chi, double *clo, double *ahi, double *bhi, double *blo, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_add_dd(&schi,&sclo,sahi,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vchi,vclo; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_add_dd_avx2(&vchi,&vclo,vahi,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahi = ahi[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_add_dd(&schi,&sclo,sahi,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; } } /** * * @fn _d_scl_dd(double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_scl_dd(double *chi, double *clo, double *ahi, double *bhi, double *blo,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; sahi = ahi[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; double vzhi[4]={0,0,0,0}; double vzlo[4]={0,0,0,0}; vahi = _mm256_set_pd(ahi[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _d_mul_dd_fma(&vchi,&vclo,vahi,vbhi,vblo); _mm256_storeu_pd( &vzhi[0], vchi ); _mm256_storeu_pd( &vzlo[0], vclo ); chi[i]=vzhi[3]; clo[i]=vzlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; sahi = ahi[0]; vahi = _mm256_broadcast_sd(&ahi[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_avx2(&vchi,&vclo,vahi,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; sahi = ahi[0]; vahi = _mm256_broadcast_sd(&ahi[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_fma(&vchi,&vclo,vahi,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; } } } /** * * @fn _d_dot_dd(double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_dot_dd(double *chi, double *clo, double *ahi, double *bhi, double *blo,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads]; double cl [num_threads]; for (i = 0; i < num_threads; i ++){ ch[i]=0; cl[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&szhi,&szlo,ch[tid],cl[tid],schi,sclo); //store ch[tid] = szhi; cl[tid] = szlo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[tid],cl[tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double schi,sclo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahi = _mm256_set_pd(ahi[i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid+3],cl[4*tid+3],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&szhi,&szlo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&szhi,&szlo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } } } /** * * @fn _d_mv_dd(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplication of double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_mv_dd(double *chi, double *clo, double *ahi,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,sbhi,sblo,schi,sclo; #pragma omp for schedule(static) for (k = 0; k < l; k ++) { //load sbhi = bhi[k]; sblo = blo[k]; for (i = 0; i < m; i ++) { //load sahi = ahi[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double zhi[4] = {0,0,0,0}; double zlo[4] = {0,0,0,0}; double ch [num_threads * m]; double cl [num_threads * m]; for (k = 0; k < num_threads * m; k ++) { ch[k] = 0; cl[k] = 0; } double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { //load vbhi = _mm256_set_pd(bhi[k],0,0,0); vblo = _mm256_set_pd(blo[k],0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); vchi = _mm256_set_pd(ch[m*tid+i],0,0,0); vclo = _mm256_set_pd(cl[m*tid+i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); ch[m*tid+i]=zhi[3]; cl[m*tid+i]=zlo[3]; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads * m; k++) { ch[k] = 0; cl[k] = 0; } double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } } break; } } /** * * @fn _d_tmv_dd(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplication of double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_tmv_dd( double *chi, double *clo, double *ahi,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; double sahi,sbhi,sblo,schi,sclo,ch,cl; #pragma omp for schedule(static) for (k = 0; k < l; k++) { ch=0; cl=0; for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&ch,&cl,schi,sclo,ch,cl); } chi[k]=ch; clo[k]=cl; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; __m256d vahi,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vchi = _mm256_set_pd(0,0,0,0); vclo = _mm256_set_pd(0,0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[k]=zhi[3]; clo[k]=zlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; } break; } } /** * * @fn _d_mm_dd(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplication of double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_mm_dd(double *chi, double *clo, double *ahi,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,sblo,schi,sclo; int i,j,k; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; int j,k,i; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_set_pd(bhi[l*j+k],0,0,0); vblo = _mm256_set_pd(blo[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); vchi = _mm256_set_pd(chi[m*j+i],0,0,0); vclo = _mm256_set_pd(clo[m*j+i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[m*j+i]=zhi[3]; clo[m*j+i]=zlo[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,sbhi,sblo,schi,sclo; __m256d vahi,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vahi,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sahi,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; } break; } } //----------------------------------------------------------------- // Double - Quad-Double operation //----------------------------------------------------------------- /** * * @fn _d_a_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of double and quad-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_a_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_add_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahh = ahh[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_add_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } /** * * @fn _d_scl_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _d_scl_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; sahh = ahh[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vahh = _mm256_set_pd(ahh[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &vzhh[0], vchh ); _mm256_storeu_pd( &vzhl[0], vchl ); _mm256_storeu_pd( &vzlh[0], vclh ); _mm256_storeu_pd( &vzll[0], vcll ); chh[i] = vzhh[3]; chl[i] = vzhl[3]; clh[i] = vzlh[3]; cll[i] = vzll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; vahh = _mm256_broadcast_sd(&ahh[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; vahh = _mm256_broadcast_sd(&ahh[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _d_dot_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_dot_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads]; double tchl [num_threads]; double tclh [num_threads]; double tcll [num_threads]; for (i = 0; i < num_threads; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],schh,schl,sclh,scll); //store tchh[tid] = szhh; tchl[tid] = szhl; tclh[tid] = szlh; tcll[tid] = szll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahh = _mm256_set_pd(ahh[i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+3],tchl[4*tid+3],tclh[4*tid+3],tcll[4*tid+3],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } } } /** * * @fn _d_mv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplicaion of double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_mv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double vte1[4]={0,0,0,0}; double vte2[4]={0,0,0,0}; double vte3[4]={0,0,0,0}; double vte4[4]={0,0,0,0}; double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double schh,schl,sclh,scll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[k],0,0,0); vbhl = _mm256_set_pd(bhl[k],0,0,0); vblh = _mm256_set_pd(blh[k],0,0,0); vbll = _mm256_set_pd(bll[k],0,0,0); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vzhh = _mm256_set_pd(tchh[m*tid+i],0,0,0); vzhl = _mm256_set_pd(tchl[m*tid+i],0,0,0); vzlh = _mm256_set_pd(tclh[m*tid+i],0,0,0); vzll = _mm256_set_pd(tcll[m*tid+i],0,0,0); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &vte1[0], vchh ); _mm256_storeu_pd( &vte2[0], vchl ); _mm256_storeu_pd( &vte3[0], vclh ); _mm256_storeu_pd( &vte4[0], vcll ); tchh[m*tid+i]=vte1[3]; tchl[m*tid+i]=vte2[3]; tclh[m*tid+i]=vte3[3]; tcll[m*tid+i]=vte4[3]; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _d_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; } } /** * * @fn _d_tmv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplicaion of double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_tmv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double schh,schl,sclh,scll,sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { schh=0; schl=0; sclh=0; scll=0; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,schh,schl,sclh,scll); //store schh = szhh; schl = szhl; sclh = szlh; scll = szll; } chh[k] = schh; chl[k] = schl; clh[k] = sclh; cll[k] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); chh[k] = tchh[3]; chl[k] = tchl[3]; clh[k] = tclh[3]; cll[k] = tcll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; } break; } } /** * * @fn _d_mm_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplicaion of double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh input (double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _d_mm_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; double szhh[4]={0,0,0,0}; double szhl[4]={0,0,0,0}; double szlh[4]={0,0,0,0}; double szll[4]={0,0,0,0}; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[l*j+k],0,0,0); vbhl = _mm256_set_pd(bhl[l*j+k],0,0,0); vblh = _mm256_set_pd(blh[l*j+k],0,0,0); vbll = _mm256_set_pd(bll[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vchh = _mm256_set_pd(chh[m*j+i],0,0,0); vchl = _mm256_set_pd(chl[m*j+i],0,0,0); vclh = _mm256_set_pd(clh[m*j+i],0,0,0); vcll = _mm256_set_pd(cll[m*j+i],0,0,0); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); _mm256_storeu_pd( &szhh[0], vchh ); _mm256_storeu_pd( &szhl[0], vchl ); _mm256_storeu_pd( &szlh[0], vclh ); _mm256_storeu_pd( &szll[0], vcll ); chh[m*j+i] = szhh[3]; chl[m*j+i] = szhl[3]; clh[m*j+i] = szlh[3]; cll[m*j+i] = szll[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _d_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; } break; } } //----------------------------------------------------------------- // Double-Double - Double operation //----------------------------------------------------------------- /** * * @fn _dd_scl_d(double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_scl_d(double *chi, double *clo, double *ahi, double *alo, double *bhi, int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,schi,sclo; sahi = ahi[0]; salo = alo[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,valo,vbhi,vchi,vclo,vch,vcl; double vzhi[4]={0,0,0,0}; double vzlo[4]={0,0,0,0}; vahi = _mm256_set_pd(ahi[0],0,0,0); valo = _mm256_set_pd(alo[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhi = _mm256_set_pd(bhi[i],0,0,0); //calc _d_mul_dd_fma(&vchi,&vclo,vbhi,vahi,valo); _mm256_storeu_pd( &vzhi[0], vchi ); _mm256_storeu_pd( &vzlo[0], vclo ); chi[i]=vzhi[3]; clo[i]=vzlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,schi,sclo; __m256d vahi,valo,vbhi,vchi,vclo,vch,vcl; sahi = ahi[0]; salo = alo[0]; vahi = _mm256_broadcast_sd(&ahi[0]); valo = _mm256_broadcast_sd(&alo[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); //calc _d_mul_dd_avx2(&vchi,&vclo,vbhi,vahi,valo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,schi,sclo; __m256d vahi,valo,vbhi,vchi,vclo,vch,vcl; sahi = ahi[0]; salo = alo[0]; vahi = _mm256_broadcast_sd(&ahi[0]); valo = _mm256_broadcast_sd(&alo[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); //calc _d_mul_dd_fma(&vchi,&vclo,vbhi,vahi,valo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); //store chi[i] = schi; clo[i] = sclo; } } break; } } } /** * * @fn _dd_mv_d(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplication of double-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mv_d(double *chi, double *clo, double *ahi, double *alo,double *bhi, int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,schi,sclo; #pragma omp for schedule(static) for (k = 0; k < l; k ++) { //load sbhi = bhi[k]; for (i = 0; i < m; i ++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double zhi[4] = {0,0,0,0}; double zlo[4] = {0,0,0,0}; double ch [num_threads * m]; double cl [num_threads * m]; for (k = 0; k < num_threads * m; k ++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,schi,sclo; __m256d vahi,valo,vbhi,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { //load vbhi = _mm256_set_pd(bhi[k],0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(ch[m*tid+i],0,0,0); vclo = _mm256_set_pd(cl[m*tid+i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); ch[m*tid+i]=zhi[3]; cl[m*tid+i]=zlo[3]; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads * m; k++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,schi,sclo; __m256d vahi,valo,vbhi,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,schi,sclo; __m256d vahi,vbhi,valo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } } break; } } /** * * @fn _dd_tmv_d(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplication of double-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_tmv_d( double *chi, double *clo, double *ahi,double *alo,double *bhi,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; double sahi,salo,sbhi,schi,sclo,ch,cl; #pragma omp for schedule(static) for (k = 0; k < l; k++) { ch=0; cl=0; for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&ch,&cl,schi,sclo,ch,cl); } chi[k]=ch; clo[k]=cl; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; __m256d vahi,valo,vbhi,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vchi = _mm256_set_pd(0,0,0,0); vclo = _mm256_set_pd(0,0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[k]=zhi[3]; clo[k]=zlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,salo,sbhi,schi,sclo; __m256d vahi,valo,vbhi,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,sbhi,salo,schi,sclo; __m256d vahi,vbhi,valo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; } break; } } /** * * @fn _dd_mm_d(double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplication of double-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mm_d(double *chi, double *clo, double *ahi, double *alo,double *bhi,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,sbhi,salo,schi,sclo; int i,j,k; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,vbhi,valo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; int j,k,i; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_set_pd(bhi[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(chi[m*j+i],0,0,0); vclo = _mm256_set_pd(clo[m*j+i],0,0,0); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[m*j+i]=zhi[3]; clo[m*j+i]=zlo[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,sbhi,salo,schi,sclo; __m256d vahi,vbhi,valo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _d_mul_dd_avx2(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,sbhi,salo,schi,sclo; __m256d vahi,vbhi,valo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _d_mul_dd_fma(&vzhi,&vzlo,vbhi,vahi,valo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; //calc _d_mul_dd(&schi,&sclo,sbhi,sahi,salo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; } break; } } //----------------------------------------------------------------- // Double-Double - Double-Double operation //----------------------------------------------------------------- /** * * @fn _dd_a_dd(double*,double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of double-double and double-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_a_dd(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_add_dd(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_add_dd_avx2(&vchi,&vclo,vahi,valo,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_add_dd(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; } } /** * * @fn _dd_a_dd_ieee(double*,double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of double-double and double-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_a_dd_ieee(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_add_dd_ieee(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_add_dd_avx2_ieee(&vchi,&vclo,vahi,valo,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_add_dd_ieee(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; } } /** * * @fn _dd_scl_dd(double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_scl_dd(double *chi, double *clo, double *ahi,double *alo, double *bhi, double *blo,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; sahi = ahi[0]; salo = alo[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; double vzhi[4]={0,0,0,0}; double vzlo[4]={0,0,0,0}; vahi = _mm256_set_pd(ahi[0],0,0,0); valo = _mm256_set_pd(alo[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _dd_mul_dd_fma(&vchi,&vclo,vahi,valo,vbhi,vblo); _mm256_storeu_pd( &vzhi[0], vchi ); _mm256_storeu_pd( &vzlo[0], vclo ); chi[i]=vzhi[3]; clo[i]=vzlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; sahi = ahi[0]; salo = alo[0]; vahi = _mm256_broadcast_sd(&ahi[0]); valo = _mm256_broadcast_sd(&alo[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_avx2(&vchi,&vclo,vahi,valo,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; sahi = ahi[0]; salo = alo[0]; vahi = _mm256_broadcast_sd(&ahi[0]); valo = _mm256_broadcast_sd(&alo[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_fma(&vchi,&vclo,vahi,valo,vbhi,vblo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); //store chi[i] = schi; clo[i] = sclo; } } break; } } } /** * * @fn _dd_dot_dd(double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_dot_dd(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads]; double cl [num_threads]; for (i = 0; i < num_threads; i ++){ ch[i]=0; cl[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&szhi,&szlo,ch[tid],cl[tid],schi,sclo); //store ch[tid] = szhi; cl[tid] = szlo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[tid],cl[tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double schi,sclo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahi = _mm256_set_pd(ahi[i],0,0,0); valo = _mm256_set_pd(alo[i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid+3],cl[4*tid+3],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&szhi,&szlo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&szhi,&szlo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } } } /** * * @fn _dd_dot_dd_ieee(double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_dot_dd_ieee(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads]; double cl [num_threads]; for (i = 0; i < num_threads; i ++){ ch[i]=0; cl[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&szhi,&szlo,ch[tid],cl[tid],schi,sclo); //store ch[tid] = szhi; cl[tid] = szlo; } #pragma omp critical { //calc _dd_add_dd_ieee(&schi,&sclo,ch[tid],cl[tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double schi,sclo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahi = _mm256_set_pd(ahi[i],0,0,0); valo = _mm256_set_pd(alo[i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp critical { //calc _dd_add_dd_ieee(&schi,&sclo,ch[4*tid+3],cl[4*tid+3],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&szhi,&szlo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd_ieee(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd_ieee(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo,szhi,szlo; double ch [num_threads*4]; double cl [num_threads*4]; for (i = 0; i < num_threads * 4; i ++){ ch[i]=0; cl[i]=0; } __m256d vahi,valo,vbhi,vblo,vzhi,vzlo; __m256d vchi = _mm256_setzero_pd(); __m256d vclo = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahi = _mm256_loadu_pd(&ahi[i]); valo = _mm256_loadu_pd(&alo[i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } //store _mm256_storeu_pd( &ch[4*tid], vchi ); _mm256_storeu_pd( &cl[4*tid], vclo ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahi = ahi[i]; salo = alo[i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&szhi,&szlo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,szhi,szlo,ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical for(i=1;i<4;i++){ //calc _dd_add_dd_ieee(&schi,&sclo,ch[4*tid+i],cl[4*tid+i],ch[4*tid],cl[4*tid]); //store ch[4*tid] = schi; cl[4*tid] = sclo; } #pragma omp critical { //calc _dd_add_dd_ieee(&schi,&sclo,ch[4*tid],cl[4*tid],chi[0],clo[0]); //store chi[0] = schi; clo[0] = sclo; } } break; } } } /** * * @fn _dd_mv_dd(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mv_dd(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,sblo,schi,sclo; #pragma omp for schedule(static) for (k = 0; k < l; k ++) { //load sbhi = bhi[k]; sblo = blo[k]; for (i = 0; i < m; i ++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double zhi[4] = {0,0,0,0}; double zlo[4] = {0,0,0,0}; double ch [num_threads * m]; double cl [num_threads * m]; for (k = 0; k < num_threads * m; k ++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { //load vbhi = _mm256_set_pd(bhi[k],0,0,0); vblo = _mm256_set_pd(blo[k],0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(ch[m*tid+i],0,0,0); vclo = _mm256_set_pd(cl[m*tid+i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); ch[m*tid+i]=zhi[3]; cl[m*tid+i]=zlo[3]; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 2: // OMP to inner loop #pragma omp parallel num_threads(omp_threadNum) { int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double zhi[4] = {0,0,0,0}; double zlo[4] = {0,0,0,0}; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; for (k = 0; k < l; k++) { //load vbhi = _mm256_set_pd(bhi[k],0,0,0); vblo = _mm256_set_pd(blo[k],0,0,0); #pragma omp for schedule(static) for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(chi[i],0,0,0); vclo = _mm256_set_pd(clo[i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[i]=zhi[3]; clo[i]=zlo[3]; } } } break; } break; case 1: switch(fma){ case -2: #pragma omp parallel num_threads(omp_threadNum) { int k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } __m256d vahi,valo,vbhi,vblo,vchi,vclo,vch,vcl; __m256d t=_mm256_set_pd(0,0,0,0); __m256d t1=_mm256_set_pd(0,0,0,0); __m256d ah=_mm256_set_pd(0,0,0,0); __m256d al=_mm256_set_pd(0,0,0,0); __m256d bh=_mm256_set_pd(0,0,0,0); __m256d bl=_mm256_set_pd(0,0,0,0); __m256d p1=_mm256_set_pd(0,0,0,0); __m256d p2=_mm256_set_pd(0,0,0,0); __m256d chi_n=_mm256_set_pd(0,0,0,0); __m256d clo_n=_mm256_set_pd(0,0,0,0); __m256d cons=_mm256_set_pd(134217729,134217729,134217729,134217729); int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vch = _mm256_loadu_pd(&ch[m*tid+i]); vcl = _mm256_loadu_pd(&cl[m*tid+i]); //multiplication p1 = _mm256_mul_pd(vahi , vbhi); p2 = _mm256_fmsub_pd(vahi , vbhi, p1); p2 = _mm256_fmadd_pd(vahi , vblo , p2); p2 = _mm256_fmadd_pd(valo , vbhi , p2); chi_n =_mm256_add_pd(p1 , p2); clo_n = _mm256_sub_pd(p2, _mm256_sub_pd(chi_n, p1)); //addition ah = _mm256_add_pd(vch, chi_n); t = _mm256_sub_pd(ah, vch); bh = _mm256_add_pd( _mm256_sub_pd(vch, _mm256_sub_pd(ah , t)), _mm256_sub_pd(chi_n , t)); al = _mm256_add_pd(vcl , clo_n); bh = _mm256_add_pd(bh , al); vch = _mm256_add_pd(ah , bh); vcl = _mm256_sub_pd(bh , _mm256_sub_pd(vch , ah)); _mm256_storeu_pd( &ch[m*tid+i], vch ); _mm256_storeu_pd( &cl[m*tid+i], vcl ); } } #pragma omp critical for(k=0;k<m;k+=4){ vchi = _mm256_loadu_pd(&chi[k]); vclo = _mm256_loadu_pd(&clo[k]); vch = _mm256_loadu_pd(&ch[m*tid+k]); vcl = _mm256_loadu_pd(&cl[m*tid+k]); ah = _mm256_add_pd(vchi, vch); t = _mm256_sub_pd(ah, vchi); bh = _mm256_add_pd( _mm256_sub_pd(vchi, _mm256_sub_pd(ah , t)), _mm256_sub_pd(vch , t)); al = _mm256_add_pd(vclo , vcl); bh = _mm256_add_pd(bh , al); vchi = _mm256_add_pd(ah , bh); vclo = _mm256_sub_pd(bh , _mm256_sub_pd(vchi , ah)); _mm256_storeu_pd( &chi[k], vchi ); _mm256_storeu_pd( &clo[k], vclo ); } } break; case -1: // (vi) #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; double zhi [num_threads*4]; double zlo [num_threads*4]; for (k = 0; k < num_threads * m; k++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l-p; k+=4) { vbhi = _mm256_loadu_pd(&bhi[k]); vblo = _mm256_loadu_pd(&blo[k]); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*(k+3)+i],ahi[m*(k+2)+i],ahi[m*(k+1)+i],ahi[m*k+i]); valo = _mm256_set_pd(alo[m*(k+3)+i],alo[m*(k+2)+i],alo[m*(k+1)+i],alo[m*k+i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); //store _mm256_storeu_pd( &zhi[tid*4], vzhi ); _mm256_storeu_pd( &zlo[tid*4], vzlo ); schi = zhi[tid]; sclo = zlo[tid]; for(j=1;j<4;j++){ //calc _dd_add_dd(&schi,&sclo,zhi[4*tid+j],zlo[4*tid+j],schi,sclo); } _dd_add_dd(&schi,&sclo,ch[m*tid+i],cl[m*tid+i],schi,sclo); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } for (k = l-p; k < l; k++) { sbhi = bhi[k]; sblo = blo[k]; for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; schi = ch[m*tid+i]; sclo = cl[m*tid+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads * m; k++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 2: // OpenMP to inner loop #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); #pragma omp for schedule(static) for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[i]); vclo = _mm256_loadu_pd(&clo[i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &chi[i], vchi ); _mm256_storeu_pd( &clo[i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[i],clo[i]); //store chi[i] = schi; clo[i] = sclo; } } } break; } break; } } /** * * @fn _dd_mv_dd_ieee(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mv_dd_ieee(double *chi, double *clo, double *ahi, double *alo, double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,sblo,schi,sclo; #pragma omp for schedule(static) for (k = 0; k < l; k ++) { //load sbhi = bhi[k]; sblo = blo[k]; for (i = 0; i < m; i ++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd_ieee(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double zhi[4] = {0,0,0,0}; double zlo[4] = {0,0,0,0}; double ch [num_threads * m]; double cl [num_threads * m]; for (k = 0; k < num_threads * m; k ++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { //load vbhi = _mm256_set_pd(bhi[k],0,0,0); vblo = _mm256_set_pd(blo[k],0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(ch[m*tid+i],0,0,0); vclo = _mm256_set_pd(cl[m*tid+i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); ch[m*tid+i]=zhi[3]; cl[m*tid+i]=zlo[3]; } } #pragma omp critical for(i = 0; i < m; i ++){ _dd_add_dd_ieee(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads * m; k++) { ch[k] = 0; cl[k] = 0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd_ieee(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k, i, j; int num_threads = omp_get_num_threads(); int tid = omp_get_thread_num(); double ch [num_threads*m]; double cl [num_threads*m]; for (k = 0; k < num_threads*m; k++) { ch[k]=0; cl[k]=0; } double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[k]); vblo = _mm256_broadcast_sd(&blo[k]); for (i = 0; i < m-p; i+=4) { //load vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&ch[m*tid+i]); vclo = _mm256_loadu_pd(&cl[m*tid+i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); //store _mm256_storeu_pd( &ch[m*tid+i], vchi ); _mm256_storeu_pd( &cl[m*tid+i], vclo ); } sbhi = bhi[k]; sblo = blo[k]; for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,ch[m*tid+i],cl[m*tid+i]); //store ch[m*tid+i] = schi; cl[m*tid+i] = sclo; } } #pragma omp critical for(i=0;i<m;i++){ _dd_add_dd_ieee(&schi,&sclo,chi[i],clo[i],ch[m*tid+i],cl[m*tid+i]); chi[i] = schi; clo[i] = sclo; } } } break; } } /** * * @fn _dd_tmv_dd(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_tmv_dd( double *chi, double *clo, double *ahi,double *alo,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; double sahi,salo,sbhi,sblo,schi,sclo,ch,cl; #pragma omp for schedule(static) for (k = 0; k < l; k++) { ch=0; cl=0; for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&ch,&cl,schi,sclo,ch,cl); } chi[k]=ch; clo[k]=cl; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vchi = _mm256_set_pd(0,0,0,0); vclo = _mm256_set_pd(0,0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[k]=zhi[3]; clo[k]=zlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; } break; } } /** * * @fn _dd_tmv_dd_ieee(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_tmv_dd_ieee( double *chi, double *clo, double *ahi,double *alo,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i; double sahi,salo,sbhi,sblo,schi,sclo,ch,cl; #pragma omp for schedule(static) for (k = 0; k < l; k++) { ch=0; cl=0; for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&ch,&cl,schi,sclo,ch,cl); } chi[k]=ch; clo[k]=cl; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; __m256d vahi,valo,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vchi = _mm256_set_pd(0,0,0,0); vclo = _mm256_set_pd(0,0,0,0); for (i = 0; i < m; i++) { //load vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vbhi = _mm256_set_pd(bhi[i],0,0,0); vblo = _mm256_set_pd(blo[i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); } _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[k]=zhi[3]; clo[k]=zlo[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd_ieee(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m % 4; int k; double ch[4]={0,0,0,0}; double cl[4]={0,0,0,0}; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; int i,j; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vch = _mm256_set_pd(0,0,0,0); vcl = _mm256_set_pd(0,0,0,0); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vbhi = _mm256_loadu_pd(&bhi[i]); vblo = _mm256_loadu_pd(&blo[i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vch,&vcl,vzhi,vzlo,vch,vcl); } _mm256_storeu_pd( &ch[0], vch ); _mm256_storeu_pd( &cl[0], vcl ); for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[i]; sblo = blo[i]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,ch[0],cl[0]); //store ch[0] = schi; cl[0] = sclo; } for(i=1;i<4;i++){ _dd_add_dd_ieee(&schi,&sclo,ch[0],cl[0],ch[i],cl[i]); ch[0] = schi; cl[0] = sclo; } chi[k]=ch[0]; clo[k]=cl[0]; } } break; } break; } } /** * * @fn _dd_mm_dd(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mm_dd(double *chi, double *clo, double *ahi, double *alo,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; int i,j,k; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,valo,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; int j,k,i; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_set_pd(bhi[l*j+k],0,0,0); vblo = _mm256_set_pd(blo[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(chi[m*j+i],0,0,0); vclo = _mm256_set_pd(clo[m*j+i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[m*j+i]=zhi[3]; clo[m*j+i]=zlo[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; } break; } } /** * * @fn _dd_mm_dd_ieee(double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplication of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_mm_dd_ieee(double *chi, double *clo, double *ahi, double *alo,double *bhi, double *blo,int m, int n, int l, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int k; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahi,salo,sbhi,sblo,schi,sclo; int i,j,k; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { for (i = 0; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahi,valo,vbhi,vblo,vchi,vclo,vzhi,vzlo; double zhi[4]={0,0,0,0}; double zlo[4]={0,0,0,0}; int j,k,i; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_set_pd(bhi[l*j+k],0,0,0); vblo = _mm256_set_pd(blo[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahi = _mm256_set_pd(ahi[m*k+i],0,0,0); valo = _mm256_set_pd(alo[m*k+i],0,0,0); vchi = _mm256_set_pd(chi[m*j+i],0,0,0); vclo = _mm256_set_pd(clo[m*j+i],0,0,0); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &zhi[0], vchi ); _mm256_storeu_pd( &zlo[0], vclo ); chi[m*j+i]=zhi[3]; clo[m*j+i]=zlo[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _dd_mul_dd_avx2(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; int p=m%4; double sahi,salo,sbhi,sblo,schi,sclo; __m256d vahi,valo,vbhi,vblo,vzhi,vzlo,vchi,vclo,vch,vcl; #pragma omp for schedule(static) for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhi = _mm256_broadcast_sd(&bhi[l*j+k]); vblo = _mm256_broadcast_sd(&blo[l*j+k]); for (i = 0; i < m-p; i+=4) { vahi = _mm256_loadu_pd(&ahi[m*k+i]); valo = _mm256_loadu_pd(&alo[m*k+i]); vchi = _mm256_loadu_pd(&chi[m*j+i]); vclo = _mm256_loadu_pd(&clo[m*j+i]); //calc _dd_mul_dd_fma(&vzhi,&vzlo,vahi,valo,vbhi,vblo); _dd_add_dd_avx2_ieee(&vchi,&vclo,vzhi,vzlo,vchi,vclo); _mm256_storeu_pd( &chi[m*j+i], vchi ); _mm256_storeu_pd( &clo[m*j+i], vclo ); } for (i = m-p; i < m; i++) { //load sahi = ahi[m*k+i]; salo = alo[m*k+i]; sbhi = bhi[l*j+k]; sblo = blo[l*j+k]; //calc _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); _dd_add_dd_ieee(&schi,&sclo,schi,sclo,chi[m*j+i],clo[m*j+i]); //store chi[m*j+i] = schi; clo[m*j+i] = sclo; } } } } break; } break; } } //----------------------------------------------------------------- // Double-Double - Quad-Double operation //----------------------------------------------------------------- /** * * @fn _dd_a_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of double-double and quad-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_a_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_add_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_add_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } /** * * @fn _dd_scl_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _dd_scl_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; sahh = ahh[0]; sahl = ahl[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vahh = _mm256_set_pd(ahh[0],0,0,0); vahl = _mm256_set_pd(ahl[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &vzhh[0], vchh ); _mm256_storeu_pd( &vzhl[0], vchl ); _mm256_storeu_pd( &vzlh[0], vclh ); _mm256_storeu_pd( &vzll[0], vcll ); chh[i] = vzhh[3]; chl[i] = vzhl[3]; clh[i] = vzlh[3]; cll[i] = vzll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _dd_dot_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_dot_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads]; double tchl [num_threads]; double tclh [num_threads]; double tcll [num_threads]; for (i = 0; i < num_threads; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],schh,schl,sclh,scll); //store tchh[tid] = szhh; tchl[tid] = szhl; tclh[tid] = szlh; tcll[tid] = szll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahh = _mm256_set_pd(ahh[i],0,0,0); vahl = _mm256_set_pd(ahl[i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+3],tchl[4*tid+3],tclh[4*tid+3],tcll[4*tid+3],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } } } /** * * @fn _dd_mv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplicaion of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_mv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double vte1[4]={0,0,0,0}; double vte2[4]={0,0,0,0}; double vte3[4]={0,0,0,0}; double vte4[4]={0,0,0,0}; double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[k],0,0,0); vbhl = _mm256_set_pd(bhl[k],0,0,0); vblh = _mm256_set_pd(blh[k],0,0,0); vbll = _mm256_set_pd(bll[k],0,0,0); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); vzhh = _mm256_set_pd(tchh[m*tid+i],0,0,0); vzhl = _mm256_set_pd(tchl[m*tid+i],0,0,0); vzlh = _mm256_set_pd(tclh[m*tid+i],0,0,0); vzll = _mm256_set_pd(tcll[m*tid+i],0,0,0); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &vte1[0], vchh ); _mm256_storeu_pd( &vte2[0], vchl ); _mm256_storeu_pd( &vte3[0], vclh ); _mm256_storeu_pd( &vte4[0], vcll ); tchh[m*tid+i]=vte1[3]; tchl[m*tid+i]=vte2[3]; tclh[m*tid+i]=vte3[3]; tcll[m*tid+i]=vte4[3]; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _dd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; } } /** * * @fn _dd_tmv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplicaion of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_tmv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { schh=0; schl=0; sclh=0; scll=0; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,schh,schl,sclh,scll); //store schh = szhh; schl = szhl; sclh = szlh; scll = szll; } chh[k] = schh; chl[k] = schl; clh[k] = sclh; cll[k] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); chh[k] = tchh[3]; chl[k] = tchl[3]; clh[k] = tclh[3]; cll[k] = tcll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; } break; } } /** * * @fn _dd_mm_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplicaion of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl input (double-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _dd_mm_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; double szhh[4]={0,0,0,0}; double szhl[4]={0,0,0,0}; double szlh[4]={0,0,0,0}; double szll[4]={0,0,0,0}; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[l*j+k],0,0,0); vbhl = _mm256_set_pd(bhl[l*j+k],0,0,0); vblh = _mm256_set_pd(blh[l*j+k],0,0,0); vbll = _mm256_set_pd(bll[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); vchh = _mm256_set_pd(chh[m*j+i],0,0,0); vchl = _mm256_set_pd(chl[m*j+i],0,0,0); vclh = _mm256_set_pd(clh[m*j+i],0,0,0); vcll = _mm256_set_pd(cll[m*j+i],0,0,0); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); _mm256_storeu_pd( &szhh[0], vchh ); _mm256_storeu_pd( &szhl[0], vchl ); _mm256_storeu_pd( &szlh[0], vclh ); _mm256_storeu_pd( &szll[0], vcll ); chh[m*j+i] = szhh[3]; chl[m*j+i] = szhl[3]; clh[m*j+i] = szlh[3]; cll[m*j+i] = szll[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _dd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; } break; } } //----------------------------------------------------------------- // Quad-Double - Double operation //----------------------------------------------------------------- /** * * @fn _qd_scl_d(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of quad-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _qd_scl_d(double* chh,double* chl,double* clh,double* cll,double *ahh, double *ahl,double *alh,double *all, double *bhh,int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,schh,schl,sclh,scll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhh = bhh[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vahl,valh,vall,vbhh,vchh,vchl,vclh,vcll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vahh = _mm256_set_pd(ahh[0],0,0,0); vahl = _mm256_set_pd(ahl[0],0,0,0); valh = _mm256_set_pd(alh[0],0,0,0); vall = _mm256_set_pd(all[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhh = _mm256_set_pd(bhh[i],0,0,0); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &vzhh[0], vchh ); _mm256_storeu_pd( &vzhl[0], vchl ); _mm256_storeu_pd( &vzlh[0], vclh ); _mm256_storeu_pd( &vzll[0], vcll ); chh[i] = vzhh[3]; chl[i] = vzhl[3]; clh[i] = vzlh[3]; cll[i] = vzll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,schh,schl,sclh,scll; __m256d vahh,vahl,valh,vall,vbhh,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); //calc _d_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; //calc _d_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _qd_mv_d(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplicaion of quad-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mv_d(double* chh,double* chl,double* clh,double* cll,double *bhh,double *ahh,double *ahl,double *alh,double *all, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { sbhh = bhh[k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double vte1[4]={0,0,0,0}; double vte2[4]={0,0,0,0}; double vte3[4]={0,0,0,0}; double vte4[4]={0,0,0,0}; double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double schh,schl,sclh,scll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[k],0,0,0); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vzhh = _mm256_set_pd(tchh[m*tid+i],0,0,0); vzhl = _mm256_set_pd(tchl[m*tid+i],0,0,0); vzlh = _mm256_set_pd(tclh[m*tid+i],0,0,0); vzll = _mm256_set_pd(tcll[m*tid+i],0,0,0); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &vte1[0], vchh ); _mm256_storeu_pd( &vte2[0], vchl ); _mm256_storeu_pd( &vte3[0], vclh ); _mm256_storeu_pd( &vte4[0], vcll ); tchh[m*tid+i]=vte1[3]; tchl[m*tid+i]=vte2[3]; tclh[m*tid+i]=vte3[3]; tcll[m*tid+i]=vte4[3]; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _d_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _d_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; } } /** * * @fn _qd_tmv_d(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplicaion of quad-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_tmv_d(double* chh,double* chl,double* clh,double* cll,double *bhh,double *ahh,double *ahl,double *alh,double *all, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double schh,schl,sclh,scll,sahh,sbhh,sahl,salh,sall,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { schh=0; schl=0; sclh=0; scll=0; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,schh,schl,sclh,scll); //store schh = szhh; schl = szhl; sclh = szlh; scll = szll; } chh[k] = schh; chl[k] = schl; clh[k] = sclh; cll[k] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); chh[k] = tchh[3]; chl[k] = tchl[3]; clh[k] = tclh[3]; cll[k] = tcll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sbhh,sahl,salh,sall,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); //calc _d_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sbhh,sahl,salh,sall,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; } break; } } /** * * @fn _qd_mm_d(double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplicaion of quad-double and double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh input (double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mm_d(double* chh,double* chl,double* clh,double* cll,double *bhh,double *ahh,double *ahl,double *alh,double *all, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { sbhh = bhh[l*j+k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; double szhh[4]={0,0,0,0}; double szhl[4]={0,0,0,0}; double szlh[4]={0,0,0,0}; double szll[4]={0,0,0,0}; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vchh = _mm256_set_pd(chh[m*j+i],0,0,0); vchl = _mm256_set_pd(chl[m*j+i],0,0,0); vclh = _mm256_set_pd(clh[m*j+i],0,0,0); vcll = _mm256_set_pd(cll[m*j+i],0,0,0); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); _mm256_storeu_pd( &szhh[0], vchh ); _mm256_storeu_pd( &szhl[0], vchl ); _mm256_storeu_pd( &szlh[0], vclh ); _mm256_storeu_pd( &szll[0], vcll ); chh[m*j+i] = szhh[3]; chl[m*j+i] = szhl[3]; clh[m*j+i] = szlh[3]; cll[m*j+i] = szll[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _d_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sbhh,sahl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vbhh,vahl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _d_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _d_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; } break; } } //----------------------------------------------------------------- // Quad-Double - Double-Double operation //----------------------------------------------------------------- /** * * @fn _qd_scl_dd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _qd_scl_dd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sbhl,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vahh = _mm256_set_pd(ahh[0],0,0,0); vahl = _mm256_set_pd(ahl[0],0,0,0); valh = _mm256_set_pd(alh[0],0,0,0); vall = _mm256_set_pd(all[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &vzhh[0], vchh ); _mm256_storeu_pd( &vzhl[0], vchl ); _mm256_storeu_pd( &vzlh[0], vclh ); _mm256_storeu_pd( &vzll[0], vcll ); chh[i] = vzhh[3]; chl[i] = vzhl[3]; clh[i] = vzlh[3]; cll[i] = vzll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); //calc _dd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sbhl,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&schh,&schl,&sclh,&scll,sbhh,sbhl,sahh,sahl,salh,sall); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _qd_mv_dd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplicaion of quad-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mv_dd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { sbhh = bhh[k]; sbhl = bhl[k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double vte1[4]={0,0,0,0}; double vte2[4]={0,0,0,0}; double vte3[4]={0,0,0,0}; double vte4[4]={0,0,0,0}; double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double schh,schl,sclh,scll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[k],0,0,0); vbhl = _mm256_set_pd(bhl[k],0,0,0); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vzhh = _mm256_set_pd(tchh[m*tid+i],0,0,0); vzhl = _mm256_set_pd(tchl[m*tid+i],0,0,0); vzlh = _mm256_set_pd(tclh[m*tid+i],0,0,0); vzll = _mm256_set_pd(tcll[m*tid+i],0,0,0); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &vte1[0], vchh ); _mm256_storeu_pd( &vte2[0], vchl ); _mm256_storeu_pd( &vte3[0], vclh ); _mm256_storeu_pd( &vte4[0], vcll ); tchh[m*tid+i]=vte1[3]; tchl[m*tid+i]=vte2[3]; tclh[m*tid+i]=vte3[3]; tcll[m*tid+i]=vte4[3]; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _dd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _dd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; } } /** * * @fn _qd_tmv_dd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplicaion of quad-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_tmv_dd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,salh,sall,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { schh=0; schl=0; sclh=0; scll=0; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,schh,schl,sclh,scll); //store schh = szhh; schl = szhl; sclh = szlh; scll = szll; } chh[k] = schh; chl[k] = schl; clh[k] = sclh; cll[k] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); chh[k] = tchh[3]; chl[k] = tchl[3]; clh[k] = tclh[3]; cll[k] = tcll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,salh,sall,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); //calc _dd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,sbhh,sbhl,salh,sall,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; } break; } } /** * * @fn _qd_mm_dd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplicaion of quad-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mm_dd(double* chh,double* chl,double* clh,double* cll,double *bhh,double *bhl, double *ahh,double *ahl,double *alh,double *all,int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; double szhh[4]={0,0,0,0}; double szhl[4]={0,0,0,0}; double szlh[4]={0,0,0,0}; double szll[4]={0,0,0,0}; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[l*j+k],0,0,0); vbhl = _mm256_set_pd(bhl[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vchh = _mm256_set_pd(chh[m*j+i],0,0,0); vchl = _mm256_set_pd(chl[m*j+i],0,0,0); vclh = _mm256_set_pd(clh[m*j+i],0,0,0); vcll = _mm256_set_pd(cll[m*j+i],0,0,0); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); _mm256_storeu_pd( &szhh[0], vchh ); _mm256_storeu_pd( &szhl[0], vchl ); _mm256_storeu_pd( &szlh[0], vclh ); _mm256_storeu_pd( &szll[0], vcll ); chh[m*j+i] = szhh[3]; chl[m*j+i] = szhl[3]; clh[m*j+i] = szlh[3]; cll[m*j+i] = szll[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _dd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,sbhh,sbhl,salh,sall,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,vbhh,vbhl,valh,vall,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _dd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vbhh,vbhl,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _dd_mul_qd(&szhh,&szhl,&szlh,&szll,sbhh,sbhl,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; } break; } } //----------------------------------------------------------------- // Quad-Double - Quad-Double operation //----------------------------------------------------------------- /** * * @fn _qd_a_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int) * @brief You can compute the addition of quad-double and quad-double numbers in a vector or matrix. You can choose the number of OpenMP threads and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_a_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx) { int i; int p = m % 4; if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); switch(avx){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_add_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_add_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } /** * * @fn _qd_scl_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the scaler of quad-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _qd_scl_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; #pragma omp for for (i = 0; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vahh = _mm256_set_pd(ahh[0],0,0,0); vahl = _mm256_set_pd(ahl[0],0,0,0); valh = _mm256_set_pd(alh[0],0,0,0); vall = _mm256_set_pd(all[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _qd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &vzhh[0], vchh ); _mm256_storeu_pd( &vzhl[0], vchl ); _mm256_storeu_pd( &vzlh[0], vclh ); _mm256_storeu_pd( &vzll[0], vcll ); chh[i] = vzhh[3]; chl[i] = vzhl[3]; clh[i] = vzlh[3]; cll[i] = vzll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll; sahh = ahh[0]; sahl = ahl[0]; salh = alh[0]; sall = all[0]; vahh = _mm256_broadcast_sd(&ahh[0]); vahl = _mm256_broadcast_sd(&ahl[0]); valh = _mm256_broadcast_sd(&alh[0]); vall = _mm256_broadcast_sd(&all[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _qd_axpy_qd(double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the axpy of double-double and double-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chi,clo output (double-double precision) * @param ahi,alo input (double-double precision) * @param bhi,blo input (double-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * @param fma ON/OFF of FMA * */ void _qd_axpy_qd(double *chh, double *chl,double *clh, double *cll,double* xhh, double* xhl, double* xlh, double* xll, double *ahh,double *ahl, double *alh,double *all,double *bhh, double *bhl,double *blh, double *bll,int m, int n, int omp_threadNum, int avx, int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: // #pragma omp parallel num_threads(omp_threadNum) // { // double sahi,salo,sbhi,sblo,schi,sclo; // sahi = ahi[0]; // salo = alo[0]; // #pragma omp for // for (i = 0; i < m * n; i ++){ // //load // sbhi = bhi[i]; // sblo = blo[i]; // // //calc // _dd_mul_dd(&schi,&sclo,sahi,salo,sbhi,sblo); // // //store // chi[i] = schi; // clo[i] = sclo; // } // } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vxhh,vxhl,vxlh,vxll; double vzhh[4]={0,0,0,0}; double vzhl[4]={0,0,0,0}; double vzlh[4]={0,0,0,0}; double vzll[4]={0,0,0,0}; vxhh = _mm256_set_pd(xhh[0],0,0,0); vxhl = _mm256_set_pd(xhl[0],0,0,0); vxlh = _mm256_set_pd(xlh[0],0,0,0); vxll = _mm256_set_pd(xll[0],0,0,0); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahh = _mm256_set_pd(ahh[i],0,0,0); vahl = _mm256_set_pd(ahl[i],0,0,0); valh = _mm256_set_pd(alh[i],0,0,0); vall = _mm256_set_pd(all[i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _qd_mul_qd_fma(&vahh,&vahl,&valh,&vall,vxhh,vxhl,vxlh,vxll,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); chh[i]=vzhh[3]; chl[i]=vzhl[3]; clh[i]=vzlh[3]; cll[i]=vzll[3]; } } break; } break; case 1: switch(fma){ case 0: // #pragma omp parallel num_threads(omp_threadNum) // { // double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,sxhh,sxhl,sxlh,sxll; // __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vxhh,vxhl,vxlh,vxll; // sxhi = xhi[0]; // sxlo = xlo[0]; // vxhi = _mm256_broadcast_sd(&xhi[0]); // vxlo = _mm256_broadcast_sd(&xlo[0]); // #pragma omp for schedule(static) // for (i = 0; i < m * n - p; i += 4) { // //load // vahi = _mm256_loadu_pd(&ahi[i]); // valo = _mm256_loadu_pd(&alo[i]); // vbhi = _mm256_loadu_pd(&bhi[i]); // vblo = _mm256_loadu_pd(&blo[i]); // // //calc // _dd_mul_dd_avx2(&vahi,&valo,vxhi,vxlo,vahi,valo); // _dd_add_dd_avx2(&vchi,&vclo,vahi,valo,vbhi,vblo); // // //store // _mm256_storeu_pd( &chi[i], vchi ); // _mm256_storeu_pd( &clo[i], vclo ); // // } // #pragma omp for // for (i = m * n - p; i < m * n; i ++){ // //load // sahi = ahi[i]; // salo = alo[i]; // sbhi = bhi[i]; // sblo = blo[i]; // // //calc // _dd_mul_dd(&sahi,&salo,sxhi,sxlo,sahi,salo); // _dd_add_dd(&schi,&sclo,sahi,salo,sbhi,sblo); // // //store // chi[i] = schi; // clo[i] = sclo; // } // } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,sxhh,sxhl,sxlh,sxll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vxhh,vxhl,vxlh,vxll; sxhh = xhh[0]; sxhl = xhl[0]; sxlh = xlh[0]; sxll = xll[0]; vxhh = _mm256_broadcast_sd(&xhh[0]); vxhl = _mm256_broadcast_sd(&xhl[0]); vxlh = _mm256_broadcast_sd(&xlh[0]); vxll = _mm256_broadcast_sd(&xll[0]); #pragma omp for schedule(static) for (i = 0; i < m * n - p; i += 4) { //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_fma(&vahh,&vahl,&valh,&vall,vxhh,vxhl,vxlh,vxll,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); //store _mm256_storeu_pd( &chh[i], vchh ); _mm256_storeu_pd( &chl[i], vchl ); _mm256_storeu_pd( &clh[i], vclh ); _mm256_storeu_pd( &cll[i], vcll ); } #pragma omp for for (i = m * n - p; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&sahh,&sahl,&salh,&sall,sxhh,sxhl,sxlh,sxll,sahh,sahl,salh,sall); _qd_add_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); //store chh[i] = schh; chl[i] = schl; clh[i] = sclh; cll[i] = scll; } } break; } } } /** * * @fn _qd_dot_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of quad-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_dot_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads]; double tchl [num_threads]; double tclh [num_threads]; double tcll [num_threads]; for (i = 0; i < num_threads; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],schh,schl,sclh,scll); //store tchh[tid] = szhh; tchl[tid] = szhl; tclh[tid] = szlh; tcll[tid] = szll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahh = _mm256_set_pd(ahh[i],0,0,0); vahl = _mm256_set_pd(ahl[i],0,0,0); valh = _mm256_set_pd(alh[i],0,0,0); vall = _mm256_set_pd(all[i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+3],tchl[4*tid+3],tclh[4*tid+3],tcll[4*tid+3],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } } } /** * * @fn _qd_dot_qd_self(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int) * @brief You can compute the inner product of quad-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_dot_qd_self(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all, int m, int n, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0) omp_threadNum = omp_get_max_threads(); int i; int p = m % 4; switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads]; double tchl [num_threads]; double tclh [num_threads]; double tcll [num_threads]; for (i = 0; i < num_threads; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } #pragma omp for for (i = 0; i < m * n; i ++){ //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],schh,schl,sclh,scll); //store tchh[tid] = szhh; tchl[tid] = szhl; tclh[tid] = szlh; tcll[tid] = szll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[tid],tchl[tid],tclh[tid],tcll[tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n; i ++){ //load vahh = _mm256_set_pd(ahh[i],0,0,0); vahl = _mm256_set_pd(ahl[i],0,0,0); valh = _mm256_set_pd(alh[i],0,0,0); vall = _mm256_set_pd(all[i],0,0,0); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+3],tchl[4*tid+3],tclh[4*tid+3],tcll[4*tid+3],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); //calc _qd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll,schh,schl,sclh,scll; double tchh [num_threads*4]; double tchl [num_threads*4]; double tclh [num_threads*4]; double tcll [num_threads*4]; for (i = 0; i < num_threads*4; i++) { tchh[i]=0; tchl[i]=0; tclh[i]=0; tcll[i]=0; } __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vzhh,vzhl,vzlh,vzll; __m256d vchh = _mm256_setzero_pd(); __m256d vchl = _mm256_setzero_pd(); __m256d vclh = _mm256_setzero_pd(); __m256d vcll = _mm256_setzero_pd(); #pragma omp for for (i = 0; i < m * n - p; i += 4){ //load vahh = _mm256_loadu_pd(&ahh[i]); vahl = _mm256_loadu_pd(&ahl[i]); valh = _mm256_loadu_pd(&alh[i]); vall = _mm256_loadu_pd(&all[i]); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vahh,vahl,valh,vall); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } //store _mm256_storeu_pd( &tchh[4*tid], vchh ); _mm256_storeu_pd( &tchl[4*tid], vchl ); _mm256_storeu_pd( &tclh[4*tid], vclh ); _mm256_storeu_pd( &tcll[4*tid], vcll ); #pragma omp for for (i = m - p; i < m * n; i++) { //load sahh = ahh[i]; sahl = ahl[i]; salh = alh[i]; sall = all[i]; //calc _qd_mul_qd(&schh,&schl,&sclh,&scll,sahh,sahl,salh,sall,sahh,sahl,salh,sall); _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],schh,schl,sclh,scll); //store tchh[4*tid] = szhh; tchl[4*tid] = szhl; tclh[4*tid] = szlh; tcll[4*tid] = szll; } #pragma omp critical for(i=1;i<4;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid+i],tchl[4*tid+i],tclh[4*tid+i],tcll[4*tid+i],tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid]); //store tchh[4*tid] = schh; tchl[4*tid] = schl; tclh[4*tid] = sclh; tcll[4*tid] = scll; } #pragma omp critical { //calc _qd_add_qd(&schh,&schl,&sclh,&scll,tchh[4*tid],tchl[4*tid],tclh[4*tid],tcll[4*tid],chh[0],chl[0],clh[0],cll[0]); //store chh[0] = schh; chl[0] = schl; clh[0] = sclh; cll[0] = scll; } } break; } } } /** * * @fn _qd_mv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-vector multiplicaion of quad-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double vte1[4]={0,0,0,0}; double vte2[4]={0,0,0,0}; double vte3[4]={0,0,0,0}; double vte4[4]={0,0,0,0}; double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double schh,schl,sclh,scll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[k],0,0,0); vbhl = _mm256_set_pd(bhl[k],0,0,0); vblh = _mm256_set_pd(blh[k],0,0,0); vbll = _mm256_set_pd(bll[k],0,0,0); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vzhh = _mm256_set_pd(tchh[m*tid+i],0,0,0); vzhl = _mm256_set_pd(tchl[m*tid+i],0,0,0); vzlh = _mm256_set_pd(tclh[m*tid+i],0,0,0); vzll = _mm256_set_pd(tcll[m*tid+i],0,0,0); //calc _qd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &vte1[0], vchh ); _mm256_storeu_pd( &vte2[0], vchl ); _mm256_storeu_pd( &vte3[0], vclh ); _mm256_storeu_pd( &vte4[0], vcll ); tchh[m*tid+i]=vte1[3]; tchl[m*tid+i]=vte2[3]; tclh[m*tid+i]=vte3[3]; tcll[m*tid+i]=vte4[3]; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _qd_mul_qd_avx2(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; int num_threads=omp_get_num_threads(); int tid = omp_get_thread_num(); double tchh [num_threads*m]; double tchl [num_threads*m]; double tclh [num_threads*m]; double tcll [num_threads*m]; for (k = 0; k < num_threads*m; k++) { tchh[k]=0; tchl[k]=0; tclh[k]=0; tcll[k]=0; } double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for schedule(static) for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[k]); vbhl = _mm256_broadcast_sd(&bhl[k]); vblh = _mm256_broadcast_sd(&blh[k]); vbll = _mm256_broadcast_sd(&bll[k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vzhh = _mm256_loadu_pd(&tchh[m*tid+i]); vzhl = _mm256_loadu_pd(&tchl[m*tid+i]); vzlh = _mm256_loadu_pd(&tclh[m*tid+i]); vzll = _mm256_loadu_pd(&tcll[m*tid+i]); //calc _qd_mul_qd_fma(&vchh,&vchl,&vclh,&vcll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &tchh[m*tid+i], vchh ); _mm256_storeu_pd( &tchl[m*tid+i], vchl ); _mm256_storeu_pd( &tclh[m*tid+i], vclh ); _mm256_storeu_pd( &tcll[m*tid+i], vcll ); } sbhh = bhh[k]; sbhl = bhl[k]; sblh = blh[k]; sbll = bll[k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&schh,&schl,&sclh,&scll,szhh,szhl,szlh,szll,tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store tchh[m*tid+i]=schh; tchl[m*tid+i]=schl; tclh[m*tid+i]=sclh; tcll[m*tid+i]=scll; } } #pragma omp critical for(i=0;i<m;i++){ //calc _qd_add_qd(&schh,&schl,&sclh,&scll,chh[i],chl[i],clh[i],cll[i],tchh[m*tid+i],tchl[m*tid+i],tclh[m*tid+i],tcll[m*tid+i]); //store chh[i]=schh; chl[i]=schl; clh[i]=sclh; cll[i]=scll; } } break; } break; } } /** * * @fn _qd_tmv_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the transposed matrix-vector multiplicaion of quad-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_tmv_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double schh,schl,sclh,scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; #pragma omp for for (k = 0; k < l; k++) { schh=0; schl=0; sclh=0; scll=0; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,schh,schl,sclh,scll); //store schh = szhh; schl = szhl; sclh = szlh; scll = szll; } chh[k] = schh; chl[k] = schl; clh[k] = sclh; cll[k] = scll; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int i,k; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m; i++) { //load vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vbhh = _mm256_set_pd(bhh[i],0,0,0); vbhl = _mm256_set_pd(bhl[i],0,0,0); vblh = _mm256_set_pd(blh[i],0,0,0); vbll = _mm256_set_pd(bll[i],0,0,0); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); chh[k] = tchh[3]; chl[k] = tchl[3]; clh[k] = tclh[3]; cll[k] = tcll[3]; } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int p = m%4; int k,i; double tchh[4]={0,0,0,0}; double tchl[4]={0,0,0,0}; double tclh[4]={0,0,0,0}; double tcll[4]={0,0,0,0}; double schh,schl,sclh,scll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (k = 0; k < l; k++) { vchh = _mm256_setzero_pd(); vchl = _mm256_setzero_pd(); vclh = _mm256_setzero_pd(); vcll = _mm256_setzero_pd(); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vbhh = _mm256_loadu_pd(&bhh[i]); vbhl = _mm256_loadu_pd(&bhl[i]); vblh = _mm256_loadu_pd(&blh[i]); vbll = _mm256_loadu_pd(&bll[i]); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); } _mm256_storeu_pd( &tchh[0], vchh ); _mm256_storeu_pd( &tchl[0], vchl ); _mm256_storeu_pd( &tclh[0], vclh ); _mm256_storeu_pd( &tcll[0], vcll ); for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; sbhh = bhh[i]; sbhl = bhl[i]; sblh = blh[i]; sbll = bll[i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } for(i=1;i<4;i++){ //calc _qd_add_qd(&szhh,&szhl,&szlh,&szll,tchh[i],tchl[i],tclh[i],tcll[i],tchh[0],tchl[0],tclh[0],tcll[0]); //store tchh[0] = szhh; tchl[0] = szhl; tclh[0] = szlh; tcll[0] = szll; } chh[k] = tchh[0]; chl[k] = tchl[0]; clh[k] = tclh[0]; cll[k] = tcll[0]; } } break; } break; } } /** * * @fn _qd_mm_qd(double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,double*,int,int,int,int,int,int) * @brief You can compute the matrix-matrix multiplicaion of double-double and quad-double numbers. You can choose the number of OpenMP threads and FMA, and AVX2 on / off. * @param chh,chl,clh,cll output (quad-double precision) * @param ahh,ahl,alh,all input (quad-double precision) * @param bhh,bhl,blh,bll input (quad-double precision) * @param m,n size * @param omp_threadNum the number of threads * @param avx ON/OFF of AVX2 * */ void _qd_mm_qd(double* chh,double* chl,double* clh,double* cll,double *ahh,double *ahl,double *alh,double *all,double *bhh,double *bhl,double *blh,double *bll, int m, int n, int l, int omp_threadNum, int avx,int fma) { if(omp_threadNum == 0){ omp_threadNum = omp_get_max_threads(); } switch(avx){ case 0: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int i,k,j; double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = 0; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; double szhh[4]={0,0,0,0}; double szhl[4]={0,0,0,0}; double szlh[4]={0,0,0,0}; double szll[4]={0,0,0,0}; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_set_pd(bhh[l*j+k],0,0,0); vbhl = _mm256_set_pd(bhl[l*j+k],0,0,0); vblh = _mm256_set_pd(blh[l*j+k],0,0,0); vbll = _mm256_set_pd(bll[l*j+k],0,0,0); for (i = 0; i < m; i++) { vahh = _mm256_set_pd(ahh[m*k+i],0,0,0); vahl = _mm256_set_pd(ahl[m*k+i],0,0,0); valh = _mm256_set_pd(alh[m*k+i],0,0,0); vall = _mm256_set_pd(all[m*k+i],0,0,0); vchh = _mm256_set_pd(chh[m*j+i],0,0,0); vchl = _mm256_set_pd(chl[m*j+i],0,0,0); vclh = _mm256_set_pd(clh[m*j+i],0,0,0); vcll = _mm256_set_pd(cll[m*j+i],0,0,0); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); _mm256_storeu_pd( &szhh[0], vchh ); _mm256_storeu_pd( &szhl[0], vchl ); _mm256_storeu_pd( &szlh[0], vclh ); _mm256_storeu_pd( &szll[0], vcll ); chh[m*j+i] = szhh[3]; chl[m*j+i] = szhl[3]; clh[m*j+i] = szlh[3]; cll[m*j+i] = szll[3]; } } } } break; } break; case 1: switch(fma){ case 0: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _qd_mul_qd_avx2(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; case 1: #pragma omp parallel num_threads(omp_threadNum) { int j,k,i; int p=m%4; double sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll,schh,schl,sclh,scll,szhh,szhl,szlh,szll; __m256d vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll,vchh,vchl,vclh,vcll,vzhh,vzhl,vzlh,vzll; #pragma omp for for (j = 0; j < n; j++) { for (k = 0; k < l; k++) { vbhh = _mm256_broadcast_sd(&bhh[l*j+k]); vbhl = _mm256_broadcast_sd(&bhl[l*j+k]); vblh = _mm256_broadcast_sd(&blh[l*j+k]); vbll = _mm256_broadcast_sd(&bll[l*j+k]); for (i = 0; i < m-p; i+=4) { //load vahh = _mm256_loadu_pd(&ahh[m*k+i]); vahl = _mm256_loadu_pd(&ahl[m*k+i]); valh = _mm256_loadu_pd(&alh[m*k+i]); vall = _mm256_loadu_pd(&all[m*k+i]); vchh = _mm256_loadu_pd(&chh[m*j+i]); vchl = _mm256_loadu_pd(&chl[m*j+i]); vclh = _mm256_loadu_pd(&clh[m*j+i]); vcll = _mm256_loadu_pd(&cll[m*j+i]); //calc _qd_mul_qd_fma(&vzhh,&vzhl,&vzlh,&vzll,vahh,vahl,valh,vall,vbhh,vbhl,vblh,vbll); _qd_add_qd_avx2(&vchh,&vchl,&vclh,&vcll,vzhh,vzhl,vzlh,vzll,vchh,vchl,vclh,vcll); //store _mm256_storeu_pd( &chh[m*j+i], vchh ); _mm256_storeu_pd( &chl[m*j+i], vchl ); _mm256_storeu_pd( &clh[m*j+i], vclh ); _mm256_storeu_pd( &cll[m*j+i], vcll ); } sbhh = bhh[l*j+k]; sbhl = bhl[l*j+k]; sblh = blh[l*j+k]; sbll = bll[l*j+k]; for (i = m-p; i < m; i++) { //load sahh = ahh[m*k+i]; sahl = ahl[m*k+i]; salh = alh[m*k+i]; sall = all[m*k+i]; //calc _qd_mul_qd(&szhh,&szhl,&szlh,&szll,sahh,sahl,salh,sall,sbhh,sbhl,sblh,sbll); _qd_add_qd(&szhh,&szhl,&szlh,&szll,szhh,szhl,szlh,szll,chh[m*j+i],chl[m*j+i],clh[m*j+i],cll[m*j+i]); //store chh[m*j+i] = szhh; chl[m*j+i] = szhl; clh[m*j+i] = szlh; cll[m*j+i] = szll; } } } } break; } break; } } //----------------------------------------------------------------- // Definition of base operation //----------------------------------------------------------------- //----------------------------------------------------------------- // Serial //----------------------------------------------------------------- /** * * @fn _d_add_dd(double*,double*,double,double,double) * @brief The function compute the addition of double and double-double number with serial computing. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * */ void _d_add_dd(double* chi,double* clo,double ahi,double bhi,double blo) { double s,v,eh,el,chi_n,clo_n; //threeSum2(ahi, bhi, blo); //twosum(ahi,bhi) s = ahi + bhi; v = s - ahi; eh = (ahi - (s - v)) + (bhi - v); //twosum(s,blo) chi_n = s + blo; v = chi_n - s; el = (s - (chi_n - v)) + (blo - v); clo_n = eh + el; *chi = chi_n; *clo = clo_n; } /** * * @fn _d_mul_dd(double*,double*,double,double,double) * @brief The function compute the multiplication of double and double-double number with serial computing. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * */ void _d_mul_dd(double* chi,double* clo,double ahi,double bhi,double blo) { double t,t1,ah,al,bh,bl,p1,p2,chi_n,clo_n; //twoprod(ahi,bhi) //split(ahi) t = 134217729 * ahi; ah = t - (t - ahi); al = ahi - ah; //split(bhi) t1 = 134217729 * bhi; bh = t1 - (t1 - bhi); bl = bhi - bh; p1 = ahi * bhi; p2 = ((ah * bh - p1) + ah * bl + al * bh) + al * bl; p2 = p2 + ahi * blo; chi_n = p1 + p2; clo_n = p2 - (chi_n - p1); *chi = chi_n; *clo = clo_n; } /** * * @fn _d_add_qd(double*,double*,double*,double*,double,double,double,double,double) * @brief The function compute the addition of double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh input (double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _d_add_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double yhh,double yhl,double ylh,double yll) { double s1,s2,s3,s4,s5,e,v,s,ss,t1,t2,t3,t4,zhh,zhl,zlh,zll; //twoSum(x,y.hh); s1 = xhh + yhh; v = s1 - xhh; e = (xhh - (s1 - v)) + (yhh - v); //twoSum(e,y.hl); s2 = e + yhl; v = s2 - e; e = (e - (s2 - v)) + (yhl - v); //twoSum(e,y.lh);; s3 = e + ylh; v = s3 - e; e = (e - (s3 - v)) + (ylh - v); //two_sum(e, y.ll) s4 = e + yll; v = s4 - e; s5 = (e - (s4 - v)) + (yll - v); //renormalize(s1, s2, s3, s4, s5) s = s4 + s5; t4 = s5 - (s - s4); ss = s3 + s; t3 = s - (ss - s3); s = s2 + ss; t2 = ss - (s - s2); zhh = s1 + s; t1 = s - (zhh - s1); s = t3 + t4; t3 = t4 - (s - t3); ss = t2 + s; t2 = s - (ss - t2); zhl = t1 + ss; t1 = ss - (zhl - t1); s = t2 + t3; t2 = t3 - (s - t2); zlh = t1 + s; t1 = s - (zlh - t1); zll = t1 + t2; *chh = zhh; *chl = zhl; *clh = zlh; *cll = zll; } /** * * @fn _d_mul_qd(double*,double*,double*,double*,double,double,double,double,double) * @brief The function compute the multiplication of double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh input (double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _d_mul_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double yhh,double yhl,double ylh,double yll) { double p1,p2,p3,p4,p5,s,ss,ah,al,bh,bl,e1,e2,e3,e4,v,sh,eh,t1,t2,t3,t4,zhh,zhl,zlh,zll; //mul //two_prod(xhh, yhh); p1 = xhh * yhh; //[ah, al] = split(a); ah = 134217729 * xhh; ah = ah - (ah - xhh); al = xhh - ah; //[bh, bl] = split(b); bh = 134217729 * yhh; bh = bh - (bh - yhh); bl = yhh - bh; e1 = ((ah * bh - p1) + ah * bl + al * bh) + al * bl; //two_prod(xhh, yhh); p2 = xhh * yhl; //[bh, bl] = split(b); bh = 134217729 * yhl; bh = bh - (bh - yhl); bl = yhl - bh; e2 = ((ah * bh - p2) + ah * bl + al * bh) + al * bl; //two_prod(xhh, yhh); p3 = xhh * ylh; //[bh, bl] = split(b); bh = 134217729 * ylh; bh = bh - (bh - ylh); bl = ylh - bh; e3 = ((ah * bh - p3) + ah * bl + al * bh) + al * bl; p4 = xhh * yll; //twosum(p2, e1) s = p2 + e1; v = s - p2; e1 = (p2 - (s - v)) + (e1 - v); p2 = s; //[p3,e2,e1]=three_sum(p3, e2, e1); //two_Sum(p3, e2); s = p3 + e2; v = s - p3; eh = (p3 - (s - v)) + (e2 - v); //two_Sum(s, e1); p3 = s + e1; v = p3 - s; s = (s - (p3 - v)) + (e1 - v); //two_Sum(eh, s); e2 = eh + s; v = e2 - eh; e1 = (eh - (e2 - v)) + (s - v); //[p4,e2]=threesum2(p4,e3,e2) //twosum(p4,e3) s = p4 + e3; v = s - p4; eh = (p4 - (s - v)) + (e3 - v); //twosum(s,e2) p4 = s + e2; v = p4 - s; s = (s - (p4 - v)) + (e2 - v); e2 = eh + s; p5 = e1 + e2; //renormalize(from p1 to p5); s = p4 + p5; t4 = p5 - (s - p4); ss = p3 + s; t3 = s - (ss - p3); s = p2 + ss; t2 = ss - (s - p2); zhh = p1 + s; t1 = s - (zhh - p1); s = t3 + t4; t3 = t4 - (s - t3); ss = t2 + s; t2 = s - (ss - t2); zhl = t1 + ss; t1 = ss - (zhl - t1); s = t2 + t3; t2 = t3 - (s -t2); zlh = t1 + s; t1 = s - (zlh - t1); zll = t1 + t2; *chh = zhh; *chl = zhl; *clh = zlh; *cll = zll; } /** * * @fn _dd_add_dd(double*,double*,double,double,double,double) * @brief The function compute the addition of double-double and double-double number with serial computing. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_add_dd(double* chi,double* clo,double ahi,double alo,double bhi,double blo) { double sl,s,v,eh,el,chi_n,clo_n; s = ahi + bhi; v = s - ahi; eh = (ahi - (s - v)) + (bhi - v); sl = alo + blo; eh = eh + sl; chi_n = s + eh; clo_n = eh - (chi_n - s); *chi = chi_n; *clo = clo_n; } /** * * @fn _dd_add_dd_ieee(double*,double*,double,double,double,double) * @brief The function compute the addition of double-double and double-double number with serial computing. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_add_dd_ieee(double* chi,double* clo,double ahi,double alo,double bhi,double blo) { /* Compute double-double = double-double + double-double */ double s1,s2,bv,t1,t2,chi_n,clo_n; /* Add two high-order words. */ s1 = ahi + bhi; bv = s1 - ahi; s2 = ((bhi - bv) + (ahi - (s1 - bv))); /* Add two low-order words. */ t1 = alo + blo; bv = t1 - alo; t2 = ((blo - bv) + (alo - (t1 - bv))); s2 += t1; /* Renormalize (s1, s2) to (t1, s2) */ t1 = s1 + s2; s2 = s2 - (t1 - s1); t2 += s2; /* Renormalize (t1, t2) */ chi_n = t1 + t2; clo_n = t2 - (chi_n - t1); *chi = chi_n; *clo = clo_n; } /** * * @fn _dd_mul_dd(double*,double*,double,double,double,double) * @brief The function compute the multiplication of double-double and double-double number with serial computing. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_mul_dd(double* chi,double* clo,double ahi,double alo,double bhi,double blo) { double t,t1,ah,al,bh,bl,p1,p2,chi_n,clo_n; t = 134217729 * ahi; ah = t - (t - ahi); al = ahi - ah; t1 = 134217729 * bhi; bh = t1 - (t1 - bhi); bl = bhi - bh; p1 = ahi * bhi; p2 = ((ah * bh - p1) + ah * bl + al * bh) + al * bl; p2 = p2 + ahi * blo; p2 = p2 + alo * bhi; chi_n = p1 + p2; clo_n = p2 - (chi_n - p1); *chi = chi_n; *clo = clo_n; } /** * * @fn _dd_add_qd(double*,double*,double*,double*,double,double,double,double,double,double) * @brief The function compute the addition of double-double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl input (double-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _dd_add_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double xhl,double yhh,double yhl,double ylh,double yll) { double s1,s2,s3,s4,s5,e1,e2,e3,e4,e5,v,u,s,ss,t1,t2,t3,t4,zhh,zhl,zlh,zll; //addition //twoSum(x.hh, y.hh); s1 = xhh + yhh; v = s1 - xhh; e1 = (xhh - (s1 - v)) + (yhh - v); //twoSum(x.hl, y.hl); t1 = xhl + yhl; v = t1 - xhl; e2 = (xhl - (t1 - v)) + (yhl - v); //twoSum(t1, e1); s2 = t1 + e1; v = s2 - t1; u = (t1 - (s2 - v)) + (e1 - v); //twoSum(e2, y.lh); t2 = e2 + ylh; v = t2 - e2; e3 = (e2 - (t2 - v)) + (ylh - v); //twoSum(u, t2); s3 = u + t2; v = s3 - u; u = (u - (s3 - v)) + (t2 - v); //twoSum(u, e3); t3 = u + e3; v = t3 - u; e4 = (u - (t3 - v)) + (e3 - v); //twoSum(t3, y.ll); s4 = t3 + yll; v = s4 - t3; e5 = (t3 - (s4 - v)) + (yll - v); s5 = e4 + e5; //renormalize(s1, s2, s3, s4, s5) s = s4 + s5; t4 = s5 - (s - s4); ss = s3 + s; t3 = s - (ss - s3); s = s2 + ss; t2 = ss - (s - s2); zhh = s1 + s; t1 = s - (zhh - s1); s = t3 + t4; t3 = t4 - (s - t3); ss = t2 + s; t2 = s - (ss - t2); zhl = t1 + ss; t1 = ss - (zhl - t1); s = t2 + t3; t2 = t3 - (s - t2); zlh = t1 + s; t1 = s - (zlh - t1); zll = t1 + t2; *chh=zhh; *chl=zhl; *clh=zlh; *cll=zll; } /** * * @fn _dd_mul_qd(double*,double*,double*,double*,double,double,double,double,double,double) * @brief The function compute the multiplication of double-double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl input (double-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _dd_mul_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double xhl,double yhh,double yhl,double ylh,double yll) { double a1h,a1l,a2h,a2l,e5,p1,p2,p3,p4,p5,s,ss,ah,al,bh,bl,e1,e2,e3,e4,v,sh,eh,t1,t2,t3,t4,zhh,zhl,zlh,zll; //two_prod(xhh, yhh); p1 = xhh * yhh; //[ah, al] = split(a); a1h = 134217729 * xhh; a1h = a1h - (a1h - xhh); a1l = xhh - a1h; //[bh, bl] = split(b); bh = 134217729 * yhh; bh = bh - (bh - yhh); bl = yhh - bh; e1 = ((a1h * bh - p1) + a1h * bl + a1l * bh) + a1l * bl; //two_prod(xhl, yhh); p2 = xhl * yhh; //[ah, al] = split(a); a2h = 134217729 * xhl; a2h = a2h - (a2h - xhl); a2l = xhl - a2h; e2 = ((a2h * bh - p2) + a2h * bl + a2l * bh) + a2l * bl; //two_prod(xhh, yhl); p3 = xhh * yhl; //[bh, bl] = split(b); bh = 134217729 * yhl; bh = bh - (bh - yhl); bl = yhl - bh; e3 = ((a1h * bh - p3) + a1h * bl + a1l * bh) + a1l * bl; //two_prod(xhl, yhl); p4 = xhl * yhl; e4 = ((a2h * bh - p4) + a2h * bl + a2l * bh) + a2l * bl; //two_prod(xhh, ylh); p5 = xhh * ylh; //[bh, bl] = split(b); bh = 134217729 * ylh; bh = bh - (bh - ylh); bl = ylh - bh; e5 = ((a1h * bh - p5) + a1h * bl + a1l * bh) + a1l * bl; //[p3,e2,e1]=three_sum(p2, p3, e1); //two_Sum(p3, p3); s = p2 + p3; v = s - p2; eh = (p2 - (s - v)) + (p3 - v); //two_Sum(s, e1); p2 = s + e1; v = p2 - s; s = (s - (p2 - v)) + (e1 - v); //two_Sum(eh, s); p3 = eh + s; v = p3 - eh; e1 = (eh - (p3 - v)) + (s - v); //three_sum(p3, p4, p5); //two_Sum(p3, p4); s = p3 + p4; v = s - p3; eh = (p3 - (s - v)) + (p4 - v); //two_Sum(s, p5); p3 = s + p5; v = p3 - s; s = (s - (p3 - v)) + (p5 - v); //two_Sum(eh, s); p4 = eh + s; v = p4 - eh; p5 = (eh - (p4 - v)) + (s - v); //twosum(e2, e3) s = e2 + e3; v = s - e2; e3 = (e2 - (s - v)) + (e3 - v); e2 = s; //twosum(p3, e2) s = p3 + e2; v = s - p3; e2 = (p3 - (s - v)) + (e2 - v); p3 = s; //twosum(p4, e3) s = p4 + e3; v = s - p4; e3 = (p4 - (s - v)) + (e3 - v); p4 = s; //twosum(p4, e2) s = p4 + e2; v = s - p4; e2 = (p4 - (s - v)) + (e2 - v); p4 = s; p5 = p5 + e2 + e3; ss = xhl * ylh + xhh * yll + e4 + e5; //threesum2(p4,e1,ss) //twosum(p4,e1) s = p4 + e1; v = s - p4; eh = (p4 - (s - v)) + (e1 - v); //twosum(s,ss) p4 = s + ss; v = p4 - s; s = (s - (p4 - v)) + (ss - v); e1 = eh + s; p5 = p5 + e1; //renormalize(from p1 to p5); s = p4 + p5; t4 = p5 - (s - p4); ss = p3 + s; t3 = s - (ss - p3); s = p2 + ss; t2 = ss - (s - p2); zhh = p1 + s; t1 = s - (zhh - p1); s = t3 + t4; t3 = t4 - (s - t3); ss = t2 + s; t2 = s - (ss - t2); zhl = t1 + ss; t1 = ss - (zhl - t1); s = t2 + t3; t2 = t3 - (s -t2); zlh = t1 + s; t1 = s - (zlh - t1); zll = t1 + t2; *chh=zhh; *chl=zhl; *clh=zlh; *cll=zll; } /** * * @fn _qd_add_qd(double*,double*,double*,double*,double,double,double,double,double,double,double,double) * @brief The function compute the addition of quad-double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl,xlh,xll input (quad-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _qd_add_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double xhl,double xlh,double xll,double yhh,double yhl,double ylh,double yll) { double e1,e2,e3,e4,v,sh,eh,t1,t2,t3,t4,zhh,zhl,zlh,zll; //two_sum(&c0, &e1, a0, b0); 12 zhh = xhh + yhh; v = zhh - xhh; e1 = (xhh - (zhh - v)) + (yhh - v); //two_sum(&c1, &e2, a1, b1); zhl = xhl + yhl; v = zhl - xhl; e2 = (xhl - (zhl - v)) + (yhl - v); //two_sum(&c1, &e1, c1, e1); e3 = zhl + e1; v = e3 - zhl; e1 = (zhl - (e3 - v)) + (e1 - v); zhl=e3; //two_sum(&c2, &e3, a2, b2); 48 zlh = xlh + ylh; v = zlh - xlh; e3 = (xlh - (zlh - v)) + (ylh - v); //three_sum(&c2, &e1, &e2, c2, e2, e1); sh = zlh + e2; v = sh - zlh; eh = (zlh - (sh - v)) + (e2 - v); zlh = sh + e1; v = zlh - sh; sh = (sh - (zlh - v)) + (e1 - v); e1 = eh + sh; v = e1 - eh; e2 = (eh - (e1 - v)) + (sh - v); //two_sum(&c3, &e4, a3, b3); 96 zll = xll + yll; v = zll - xll; e4 = (xll - (zll - v)) + (yll - v); //three_sum2(&c3, &e1, c3, e3, e1);125 sh = zll + e3; v = sh - zll; eh = (zll - (sh - v)) + (e3 - v); zll = sh + e1; v = zll - sh; e1 = eh + (sh - (zll - v)) + (e1 - v); e1 = e1 + e2 + e4; eh=zhh; e2=zhl; e3=zlh; e4=zll; // renormalize(&c0, &c1, &c2, &c3, c0, c1, c2, c3, e1); 129+69 = 198 sh = e4 + e1; t4 = e1 - (sh - e4); v = e3 + sh; t3 = sh - (v - e3); sh = e2 + v; t2 = v - (sh - e2); zhh = eh + sh; t1 = sh - (zhh - eh); sh = t3 + t4; t3 = t4 - (sh - t3); v = t2 + sh; t2 = sh - (v - t2); zhl = t1 + v; t1 = v - (zhl - t1); sh = t2 + t3; t2 = t3 - (sh -t2); zlh = t1 + sh; t1 = sh - (zlh - t1); zll = t1 + t2; *chh = zhh; *chl = zhl; *clh = zlh; *cll = zll; } /** * * @fn _qd_mul_qd(double*,double*,double*,double*,double,double,double,double,double,double,double,double) * @brief The function compute the multiplication of quad-double and quad-double number with serial computing. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl,xlh,xll input (quad-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _qd_mul_qd(double* chh,double* chl,double* clh,double* cll,double xhh,double xhl,double xlh,double xll,double yhh,double yhl,double ylh,double yll) { double bhh,bhl,blh,bll; double p01,p10,p11,p02,p20,ah00,al00,bh00,bl00,ah01,al01,bh01,bl01,ah10,al10; double bh10,bl10,ah11,al11,bh11,bl11,ah20,al20,bh20,bl20,ah02,al02,bh02,bl02; //two_prod(zhh, al00, xhh, yhh); bhh = xhh * yhh; //[ah, al] = Calc.split(a);3 ah00 = 134217729 * xhh; ah00 = ah00 - (ah00 - xhh); al00 = xhh - ah00; //[bh, bl] = Calc.split(b);9*2+3+10=31 bh00 = 134217729 * yhh; bh00 = bh00 - (bh00 - yhh); bl00 = yhh - bh00; al00 = ((ah00 * bh00 - bhh) + ah00 * bl00 + al00 * bh00) + al00* bl00; //O(eps) terms //two_prod(p01, bl00, xhh, yhl); p01=xhh*yhl; //split ah01=134217729 * xhh; ah01=ah01-(ah01-xhh); al01=xhh-ah01; //split bh01=134217729 * yhl; bh01=bh01-(bh01-yhl); bl01=yhl-bh01; bl00 = ((ah01 * bh01 - p01) + ah01 * bl01 + al01 * bh01) + al01 * bl01; //two_prod(&p10, &al10, xhl, yhh); p10=xhl*yhh; //split ah10=134217729 * xhl; ah10=ah10-(ah10-xhl); al10=xhl-ah10; //split bh10=134217729 * yhh; bh10=bh10-(bh10-yhh); bl10=yhh-bh10; al10 = ((ah10 * bh10 - p10) + ah10 * bl10 + al10 * bh10) + al10 * bl10; //three_sum(zhl, p10, p01, p01, p10, al00); // [ah00, bh00] = Calc.twoSum(p01, p10);12 ah00=p01+p10; al01=ah00-p01; bh00=(p01-(ah00-al01))+(p10-al01); // [zhl, ah00] = Calc.twoSum(sh, al00);24 bhl=ah00+al00; al01=bhl-ah00; ah00=(ah00-(bhl-al01))+(al00-al01); // [p10, p01] = Calc.twoSum(eh, sh);36 p10=bh00+ah00; al01=p10-bh00; p01=(bh00-(p10-al01))+(ah00-al01); //--------------------------------------------------- //O(eps^2) terms //two_prod(&p02, &ah02, xhh, ylh); p02=xhh*ylh; //split ah02=134217729 * xhh; ah02=ah02-(ah02-xhh); al02=xhh-ah02; //split bh02=134217729 * ylh; bh02=bh02-(bh02-ylh); bl02=ylh-bh02; ah02 = ((ah02 * bh02 - p02) + ah02 * bl02 + al02 * bh02) + al02 * bl02; //two_prod(&p11, &bl11, xhl, yhl); p11=xhl*yhl; //split ah11=134217729 * xhl; ah11=ah11-(ah11-xhl); al11=xhl-ah11; //split bh11=134217729 * yhl; bh11=bh11-(bh11-yhl); bl11=yhl-bh11; bl11 = ((ah11 * bh11 - p11) + ah11 * bl11 + al11 * bh11) + al11 * bl11; //two_prod(&p20, &al11, xlh, yhh); p20=xlh*yhh; //split ah20=134217729 * xlh; ah20=ah20-(ah20-xlh); al20=xlh-ah20; //split bh20=134217729 * yhh; bh20=bh20-(bh20-yhh); bl20=yhh-bh20; al11 = (ah20 * bh20 - p20) + ah20 * bl20 + al20 * bh20 + al20 * bl20; //six three sum for p10, bl00, al10, p02, p11, p20 //three_sum(&p10, &bl00, &al10, p10, bl00, al10); // [ah20, bh20] = Calc.twoSum(p10, bl00); ah20=p10+bl00; al20=ah20-p10; bh20=(p10-(ah20-al20))+(bl00-al20); // [p10, ah20] = Calc.twoSum(ah20, al10); p10=ah20+al10; al20=p10-ah20; ah20=(ah20-(p10-al20))+(al10-al20); // [bl00, al10] = Calc.twoSum(bh20, ah20); bl00=bh20+ah20; al20=bl00-bh20; al10=(bh20-(bl00-al20))+(ah20-al20); //three_sum(&p02, &p11, &p20, p02, p11, p20); // [ah11, bh02] = Calc.twoSum(p02, p11); ah11=p02+p11; al02=ah11-p02; bh02=(p02-(ah11-al02))+(p11-al02); // [p02, ah11] = Calc.twoSum(ah11, p20); p02=ah11+p20; al02=p02-ah11; ah11=(ah11-(p02-al02))+(p20-al02); // [p11, p20] = Calc.twoSum(bh02, ah11); p11=bh02+bh02; al02=p11-bh02; p20=(bh02-(p11-al02))+(ah11-al02); // two_sum(zlh, &p10, p02, p10); blh = p02 + p10; bl02 = blh - p02; p10 = (p02 - (blh - bl02)) + (p10 - bl02); //two_sum(&p11, &bl00, p11, bl00); bl20=p11; p11 = bl20 + bl00; bl01 = p11 - bl20; bl00 = (bl20 - (p11 - bl01)) + (bl00 - bl01); //two_sum(&p10, &p11, p10, p11); bl01=p10; p10 = bl01 + p11; bl20 = p10 - bl01; p11 = (bl01 - (p10 - bl20)) + (p11 - bl20); //O(eps^4) terms al10 = al10 + p20 + bl00 + p11; //O(eps^3) terms bll = p10 + xhh * yll + xhl * ylh + xlh * yhl + xll * yhh + ah02 + bl11 + al11; //printf("%e\n",zhl); al00=bhh; bl00=bhl; ah02=blh; al11=bll; //renormalize(&c0[0], &c1[0], &c2[0], &c3[0], al00,bl00,ah02,al11, al10); bl02 = al11 + al10; bl01 = al10 - (bl02 - al11); bh02 = ah02 + bl02; bh01 = bl02 - (bh02 - ah02); bl02 = bl00 + bh02; al01 = bh02 - (bl02 - bl00); bhh = al00 + bl02; ah01 = bl02 - (bhh - al00); bl02 = bh01 + bl01; bh01 = bl01 - (bl02 - bh01); bh02 = al01 + bl02; al01 = bl02 - (bh02 - al01); bhl = ah01 + bh02; ah01 = bh02 - (bhl - ah01); bl02 = al01 + bh01; al01 = bh01 - (bl02 -al01); blh = ah01 + bl02; ah01 = bl02 - (blh - ah01); bll = ah01 + al01; *chh = bhh; *chl = bhl; *clh = blh; *cll = bll; } //----------------------------------------------------------------- // FMA //----------------------------------------------------------------- /** * * @fn _d_mul_dd_fma(__m256d*,__m256d*,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double and double-double number with FMA. * @param vchi,vclo output (double-double precision) * @param vahi input (double precision) * @param vbhi,vblo input (double-double precision) * */ void _d_mul_dd_fma(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d vbhi,__m256d vblo) { __m256d vp1,vp2,vchi_n,vclo_n; //twoprod_fma(ahi, bhi) vp1 = _mm256_mul_pd(vahi,vbhi); vp2 = _mm256_fmsub_pd(vahi,vbhi,vp1); vp2 = _mm256_fmadd_pd(vahi , vblo , vp2); vchi_n =_mm256_add_pd(vp1 , vp2); vclo_n = _mm256_sub_pd(vp2, _mm256_sub_pd(vchi_n, vp1)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _d_mul_qd_fma(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double and quad-double number with FMA. * @param vchh,vchl,vclh,vcll output (quad-double precision) * @param vxhh input (double precision) * @param vyhh,vyhl,vylh,vyll input (quad-double precision) * */ void _d_mul_qd_fma(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vsh,ve1,ve2,ve3,ve4,veh,vs1,vs2,vs3,vs4,vs5,vp1,vp2,vp3,vp4,vp5,ve,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; //mul //two_prod(a, b.hh); vp1 = _mm256_mul_pd(vxhh, vyhh); ve1 = _mm256_fmsub_pd(vxhh,vyhh,vp1); //two_prod(a, b.hl); vp2 = _mm256_mul_pd(vxhh,vyhl); ve2 = _mm256_fmsub_pd(vxhh,vyhl,vp2); //two_prod(a, b.lh); vp3 = _mm256_mul_pd(vxhh,vylh); ve3 = _mm256_fmsub_pd(vxhh,vylh,vp3); vp4 = _mm256_mul_pd(vxhh,vyll); //two_Sum(p2, e1); vs = _mm256_add_pd(vp2, ve1); vv =_mm256_sub_pd(vs, vp2); ve1 =_mm256_add_pd(_mm256_sub_pd(vp2 ,_mm256_sub_pd(vs,vv)),_mm256_sub_pd(ve1,vv)); vp2 = vs; //three_sum(p3, e2, e1); //two_Sum(p3, e2); vs = _mm256_add_pd(vp3, ve2); vv =_mm256_sub_pd(vs, vp3); veh =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); //two_Sum(s, e1); vp3 = _mm256_add_pd(vs, ve1); vv =_mm256_sub_pd(vp3, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(eh, s); ve2 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(ve2, veh); ve1 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(ve2,vv)), _mm256_sub_pd(vs,vv)); //three_sum2(p4, e3, e2); //two_Sum(p4, e3); vs = _mm256_add_pd(vp4, ve3); vv =_mm256_sub_pd(vs, vp4); veh =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); //two_Sum(s, e2); vp4 = _mm256_add_pd(vs, ve2); vv =_mm256_sub_pd(vp4, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(ve2,vv)); ve2 = _mm256_add_pd(veh, vs); vp5 = _mm256_add_pd(ve1, ve2); //renormalize(p1, p2, p3, p4, p5); vs = _mm256_add_pd(vp4, vp5); vt4 = _mm256_sub_pd(vp5, _mm256_sub_pd(vs, vp4)); vss = _mm256_add_pd(vp3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vp3)); vs = _mm256_add_pd(vp2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vp2)); vzhh = _mm256_add_pd(vp1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vp1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _dd_mul_dd_fma(__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double-double and double-double number with FMA. * @param vchi,vclo output (double-double precision) * @param vahi input (double precision) * @param vbhi,vblo input (quad-double precision) * */ void _dd_mul_dd_fma(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d valo,__m256d vbhi,__m256d vblo) { __m256d vp1,vp2,vchi_n,vclo_n; vp1 = _mm256_mul_pd(vahi,vbhi); vp2 = _mm256_fmsub_pd(vahi,vbhi,vp1); vp2 = _mm256_fmadd_pd(vahi , vblo , vp2); vp2 = _mm256_fmadd_pd(valo , vbhi , vp2); vchi_n =_mm256_add_pd(vp1 , vp2); vclo_n = _mm256_sub_pd(vp2, _mm256_sub_pd(vchi_n, vp1)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _dd_mul_qd_fma(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double-double and quad-double number with FMA. * @param vchh,vchl,vclh,vcll output (quad-double precision) * @param vxhh,vxhl input (double-double precision) * @param vyhh,vyhl,vylh,vyll input (quad-double precision) * */ void _dd_mul_qd_fma(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vxhl,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vsh,ve1,ve2,ve3,ve4,ve5,veh,vs1,vs2,vs3,vs4,vs5,vp1,vp2,vp3,vp4,vp5,ve,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; //mul //two_prod(a.hh, b.hh); vp1 = _mm256_mul_pd(vxhh, vyhh); ve1 = _mm256_fmsub_pd(vxhh,vyhh,vp1); //two_prod(a.hl, b.hh); vp2 = _mm256_mul_pd(vxhl,vyhh); ve2 = _mm256_fmsub_pd(vxhl,vyhh,vp2); //two_prod(a.hh, b.hl); vp3 = _mm256_mul_pd(vxhh,vyhl); ve3 = _mm256_fmsub_pd(vxhh,vyhl,vp3); //two_prod(a.hl, b.hl); vp4 = _mm256_mul_pd(vxhl,vyhl); ve4 = _mm256_fmsub_pd(vxhl,vyhl,vp4); //two_prod(a.hh, b.lh); vp5 = _mm256_mul_pd(vxhh,vylh); ve5 = _mm256_fmsub_pd(vxhh,vylh,vp5); //three_sum(p2, p3, e1); //two_Sum(p2, p3); vs = _mm256_add_pd(vp2, vp3); vv =_mm256_sub_pd(vs, vp2); veh =_mm256_add_pd(_mm256_sub_pd(vp2 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(vp3,vv)); //two_Sum(s, e1); vp2 = _mm256_add_pd(vs, ve1); vv =_mm256_sub_pd(vp2, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp2,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(eh, s); vp3 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(vp3, veh); ve1 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(vs,vv)); //three_sum(p3, p4, p5); //two_Sum(p3, p4); vs = _mm256_add_pd(vp3, vp4); vv =_mm256_sub_pd(vs, vp3); veh =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(vp4,vv)); //two_Sum(s, p5); vp3 = _mm256_add_pd(vs, vp5); vv =_mm256_sub_pd(vp3, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(vp5,vv)); //two_Sum(eh, s); vp4 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(vp4, veh); vp5 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(vs,vv)); //two_Sum(e2, e3); vs = _mm256_add_pd(ve2, ve3); vv =_mm256_sub_pd(vs, ve2); ve3 =_mm256_add_pd(_mm256_sub_pd(ve2 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); ve2 = vs; //two_Sum(p3, e2); vs = _mm256_add_pd(vp3, ve2); vv =_mm256_sub_pd(vs, vp3); ve2 =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); vp3 = vs; //two_Sum(p4, e3); vs = _mm256_add_pd(vp4, ve3); vv =_mm256_sub_pd(vs, vp4); ve3 =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); vp4 = vs; //two_Sum(p4, e2); vs = _mm256_add_pd(vp4, ve2); vv =_mm256_sub_pd(vs, vp4); ve2 =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); vp4 = vs; vp5 = _mm256_add_pd(_mm256_add_pd(vp5, ve2),ve3); vss =_mm256_add_pd(_mm256_fmadd_pd(vxhl,vylh,ve4),_mm256_fmadd_pd(vxhh,vyll,ve5)); //three_sum2(p4, e1, ss); //two_Sum(p4, e1); vs = _mm256_add_pd(vp4, ve1); vv =_mm256_sub_pd(vs, vp4); veh =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(s, e2); vp4 = _mm256_add_pd(vs, vss); vv =_mm256_sub_pd(vp4, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(vss,vv)); ve1 = _mm256_add_pd(veh, vs); vp5 = _mm256_add_pd(vp5, ve1); //renormalize(p1, p2, p3, p4, p5); vs = _mm256_add_pd(vp4, vp5); vt4 = _mm256_sub_pd(vp5, _mm256_sub_pd(vs, vp4)); vss = _mm256_add_pd(vp3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vp3)); vs = _mm256_add_pd(vp2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vp2)); vzhh = _mm256_add_pd(vp1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vp1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _qd_mul_qd_fma(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of quad-double and quad-double number with FMA. * @param vchh,vchl,vclh,vcll output (quad-double precision) * @param vxhh,vxhl,vxlh,vxll input (quad-double precision) * @param vyhh,vyhl,vylh,vyll input (quad-double precision) * */ void _qd_mul_qd_fma(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vxhl,__m256d vxlh,__m256d vxll,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d zhh,zhl,zlh,zll; __m256d p01,p10,p11,p02,p20,ah00,al00,bh00,bl00,ah01,al01,bh01,bl01,ah10,al10; __m256d bh10,bl10,ah11,al11,bh11,bl11,ah20,al20,bh20,bl20,ah02,al02,bh02,bl02; //mul //two_prod(&c0[0], &al00, a0, b0); zhh = _mm256_mul_pd(vxhh, vyhh); al00 = _mm256_fmsub_pd(vxhh,vyhh,zhh); //O(eps) terms //two_prod(&p01, &bl00, a0, b1); p01 = _mm256_mul_pd(vxhh,vyhl); bl00 = _mm256_fmsub_pd(vxhh,vyhl,p01); //two_prod(&p10, &al10, a1, b0); p10=_mm256_mul_pd(vxhl,vyhh); al10 = _mm256_fmsub_pd(vxhl,vyhh,p10); //three_sum(&c1[0], &p10, &p01, p01, p10, al00); // [ah00, bh00] = Calc.twoSum(p01, p10); ah00=_mm256_add_pd(p01,p10); al01=_mm256_sub_pd(ah00,p01); bh00=_mm256_add_pd(_mm256_sub_pd(p01,_mm256_sub_pd(ah00,al01)),_mm256_sub_pd(p10,al01)); // [zhl, ah00] = Calc.twoSum(sh, al00); zhl=_mm256_add_pd(ah00,al00); al01=_mm256_sub_pd(zhl,ah00); ah00=_mm256_add_pd(_mm256_sub_pd(ah00,_mm256_sub_pd(zhl,al01)),_mm256_sub_pd(al00,al01)); // [p10, p01] = Calc.twoSum(eh, sh); p10=_mm256_add_pd(bh00,ah00); al01=_mm256_sub_pd(p10,bh00); p01=_mm256_add_pd(_mm256_sub_pd(bh00,_mm256_sub_pd(p10,al01)),_mm256_sub_pd(ah00,al01)); //--------------------------------------------------- //O(eps^2) terms //two_prod(&p02, &ah02, a0, b2); p02=_mm256_mul_pd(vxhh,vylh); ah02 = _mm256_fmsub_pd(vxhh,vylh,p02); //two_prod(&p11, &bl11, a1, b1); p11=_mm256_mul_pd(vxhl,vyhl); bl11 = _mm256_fmsub_pd(vxhl,vyhl,p11); //two_prod(&p20, &al11, a2, b0); p20=_mm256_mul_pd(vxlh,vyhh); al11 = _mm256_fmsub_pd(vxlh,vyhh,p20); //six three sum for p10, bl00, al10, p02, p11, p20 //three_sum(&p10, &bl00, &al10, p10, bl00, al10); // [ah20, bh20] = Calc.twoSum(p10, bl00); ah20=_mm256_add_pd(p10,bl00); al20=_mm256_sub_pd(ah20,p10); bh20=_mm256_add_pd(_mm256_sub_pd(p10,_mm256_sub_pd(ah20,al20)),_mm256_sub_pd(bl00,al20)); // [p10, ah20] = Calc.twoSum(ah20, al10); p10=_mm256_add_pd(ah20,al10); al20=_mm256_sub_pd(p10,ah20); ah20=_mm256_add_pd(_mm256_sub_pd(ah20,_mm256_sub_pd(p10,al20)),_mm256_sub_pd(al10,al20)); // [bl00, al10] = Calc.twoSum(bh20, ah20); bl00=_mm256_add_pd(bh20,ah20); al20=_mm256_sub_pd(bl00,bh20); al10=_mm256_add_pd(_mm256_sub_pd(bh20,_mm256_sub_pd(bl00,al20)),_mm256_sub_pd(ah20,al20)); //three_sum(&p02, &p11, &p20, p02, p11, p20); // [ah11, bh02] = Calc.twoSum(p02, p11); ah11=_mm256_add_pd(p02,p11); al02=_mm256_sub_pd(ah11,p02); bh02=_mm256_add_pd(_mm256_sub_pd(p02,_mm256_sub_pd(ah11,al02)),_mm256_sub_pd(p11,al02)); // [p02, ah11] = Calc.twoSum(ah11, p20); p02=_mm256_add_pd(ah11,p20); al02=_mm256_sub_pd(p02,ah11); ah11= _mm256_add_pd(_mm256_sub_pd(ah11,_mm256_sub_pd(p02,al02)),_mm256_sub_pd(p20,al02)); // [p11, p20] = Calc.twoSum(bh02, ah11); p11=_mm256_add_pd(bh02,bh02); al02=_mm256_sub_pd(p11,bh02); p20=_mm256_add_pd(_mm256_sub_pd(bh02,_mm256_sub_pd(p11,al02)),_mm256_sub_pd(ah11,al02)); // two_sum(&c2[0], &p10, p02, p10); zlh = _mm256_add_pd(p02 , p10); bl02 = _mm256_sub_pd(zlh , p02); p10 = _mm256_add_pd(_mm256_sub_pd(p02 , _mm256_sub_pd(zlh , bl02)) , _mm256_sub_pd(p10 , bl02)); //two_sum(&p11, &bl00, p11, bl00); bl20=p11; p11 = _mm256_add_pd(bl20 , bl00); bl01 = _mm256_sub_pd(p11 , bl20); bl00 = _mm256_add_pd(_mm256_sub_pd(bl20 , _mm256_sub_pd(p11 , bl01)) , _mm256_sub_pd(bl00 , bl01)); //two_sum(&p10, &p11, p10, p11); bl01=p10; p10 = _mm256_add_pd(bl01 , p11); bl20 = _mm256_sub_pd(p10 , bl01); p11 = _mm256_add_pd(_mm256_sub_pd(bl01 , _mm256_sub_pd(p10 , bl20)) , _mm256_sub_pd(p11 , bl20)); //O(eps^4) terms al10 = _mm256_add_pd(_mm256_add_pd(al10 , p20) , _mm256_add_pd(bl00 , p11)); //no fma //O(eps^3) terms zll=_mm256_add_pd( _mm256_add_pd( _mm256_fmadd_pd(vxhh,vyll, _mm256_fmadd_pd( vxhl,vylh,_mm256_fmadd_pd( vxlh,vyhl,_mm256_fmadd_pd( vxll,vyhh,ah02)))), _mm256_add_pd(bl11,al11)),p10); al00=zhh; bl00=zhl; ah02=zlh; al11=zll; //renormalize(&c0[0], &c1[0], &c2[0], &c3[0], al00,bl00,ah02,al11, al10); bl02 = _mm256_add_pd(al11 , al10); bl01 = _mm256_sub_pd(al10 , _mm256_sub_pd(bl02, al11)); bh02 = _mm256_add_pd(ah02 , bl02); bh01 = _mm256_sub_pd(bl02 , _mm256_sub_pd(bh02 , ah02)); bl02 = _mm256_add_pd(bl00 , bh02); al01 = _mm256_sub_pd(bh02, _mm256_sub_pd(bl02 , bl00)); zhh = _mm256_add_pd(al00 , bl02); ah01 = _mm256_sub_pd(bl02 , _mm256_sub_pd(zhh , al00)); bl02 = _mm256_add_pd(bh01 , bl01); bh01 = _mm256_sub_pd(bl01 ,_mm256_sub_pd(bl02 , bh01)); bh02 = _mm256_add_pd(al01 , bl02); al01 = _mm256_sub_pd(bl02 , _mm256_sub_pd(bh02 ,al01)); zhl = _mm256_add_pd(ah01 , bh02); ah01 = _mm256_sub_pd(bh02 , _mm256_sub_pd(zhl, ah01)); bl02 = _mm256_add_pd(al01 , bh01); al01 = _mm256_sub_pd(bh01 , _mm256_sub_pd(bl02 ,al01)); zlh = _mm256_add_pd(ah01 , bl02); ah01 = _mm256_sub_pd(bl02 , _mm256_sub_pd(zlh , ah01)); zll = _mm256_add_pd(ah01 , al01); *vchh = zhh; *vchl = zhl; *vclh = zlh; *vcll = zll; } //----------------------------------------------------------------- // AVX2 //----------------------------------------------------------------- /** * * @fn _d_add_dd_avx2(__m256d*,__m256d*,__m256d,__m256d,__m256d) * @brief The function compute the addition of double and double-double number with AVX2. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * */ void _d_add_dd_avx2(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d vbhi,__m256d vblo) { __m256d vs,vv,veh,vel,vchi_n,vclo_n; //threeSum2(ahi, bhi, blo); //twosum(ahi,bhi) vs = _mm256_add_pd(vahi,vbhi); vv = _mm256_sub_pd(vs, vahi); veh = _mm256_add_pd(_mm256_sub_pd(vahi ,_mm256_sub_pd(vs, vv)), _mm256_sub_pd(vbhi ,vv)); //twosum(s,blo) vchi_n = _mm256_add_pd(vs , vblo); vv = _mm256_sub_pd(vchi_n, vs); vel = _mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vchi_n, vv)), _mm256_sub_pd(vblo ,vv)); vclo_n = _mm256_add_pd(veh, vel); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _d_mul_dd_avx2(__m256d*,__m256d*,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double and double-double number with AVX2. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (double-double precision) * */ void _d_mul_dd_avx2(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d vbhi,__m256d vblo) { __m256d vt,vah,val,vbh,vbl,vt1,vp1,vp2,vchi_n,vclo_n; __m256d vcons=_mm256_set_pd(134217729,134217729,134217729,134217729); vt = _mm256_mul_pd(vcons, vahi); vah = _mm256_sub_pd(vt ,_mm256_sub_pd(vt, vahi)); val = _mm256_sub_pd(vahi, vah); vt1 = _mm256_mul_pd(vcons , vbhi); vbh = _mm256_sub_pd(vt1 ,_mm256_sub_pd(vt1, vbhi)); vbl = _mm256_sub_pd(vbhi ,vbh); vp1 = _mm256_mul_pd(vahi ,vbhi); vp2= _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(vah,vbh),vp1), _mm256_mul_pd(vah,vbl)), _mm256_mul_pd(val,vbh)), _mm256_mul_pd(val,vbl)); vp2 = _mm256_add_pd(vp2,_mm256_mul_pd(vahi,vblo)); vchi_n =_mm256_add_pd(vp1 , vp2); vclo_n = _mm256_sub_pd(vp2, _mm256_sub_pd(vchi_n, vp1)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _d_add_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the addition of double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh input (double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _d_add_qd_avx2(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vs1,vs2,vs3,vs4,vs5,ve,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; //twoSum(x,y.hh); vs1 = _mm256_add_pd(vxhh, vyhh); vv = _mm256_sub_pd(vs1, vxhh); ve = _mm256_add_pd( _mm256_sub_pd(vxhh, _mm256_sub_pd(vs1, vv)), _mm256_sub_pd(vyhh, vv)); //twoSum(e,y.hl); vs2 = _mm256_add_pd(ve, vyhl); vv = _mm256_sub_pd(vs2, ve); ve = _mm256_add_pd( _mm256_sub_pd(ve, _mm256_sub_pd(vs2, vv)), _mm256_sub_pd(vyhl, vv)); //twoSum(e,y.lh); vs3 = _mm256_add_pd(ve, vylh); vv = _mm256_sub_pd(vs3, ve); ve = _mm256_add_pd( _mm256_sub_pd(ve, _mm256_sub_pd(vs3, vv)), _mm256_sub_pd(vylh, vv)); //twoSum(e,y.ll); vs4 = _mm256_add_pd(ve, vyll); vv = _mm256_sub_pd(vs4, ve); vs5 = _mm256_add_pd( _mm256_sub_pd(ve, _mm256_sub_pd(vs4, vv)), _mm256_sub_pd(vyll, vv)); //renormalize(s1, s2, s3, s4, s5); vs = _mm256_add_pd(vs4, vs5); vt4 = _mm256_sub_pd(vs5, _mm256_sub_pd(vs, vs4)); vss = _mm256_add_pd(vs3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vs3)); vs = _mm256_add_pd(vs2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vs2)); vzhh = _mm256_add_pd(vs1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vs1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _d_mul_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh input (double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _d_mul_qd_avx2(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vah,val,vbh,vbl,vsh,ve1,ve2,ve3,ve4,veh,vs1,vs2,vs3,vs4,vs5,vp1,vp2,vp3,vp4,vp5,ve,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; __m256d vcons=_mm256_set_pd(134217729,134217729,134217729,134217729); //mul //[ah, al] = Calc.split(a); vah = _mm256_mul_pd(vcons, vxhh); vah = _mm256_sub_pd(vah, _mm256_sub_pd(vah, vxhh)); val = _mm256_sub_pd(vxhh, vah); //[bh, bl] = Calc.split(b); vbh = _mm256_mul_pd(vcons, vyhh); vbh = _mm256_sub_pd(vbh, _mm256_sub_pd(vbh, vyhh)); vbl = _mm256_sub_pd(vyhh, vbh); //two_prod(a, b.hh); vp1 = _mm256_mul_pd(vxhh, vyhh); ve1 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah, vbh), vp1), _mm256_mul_pd(vah, vbl)), _mm256_mul_pd(val, vbh)), _mm256_mul_pd(val, vbl)); //[bh, bl] = Calc.split(bhl); vbh = _mm256_mul_pd(vcons, vyhl); vbh = _mm256_sub_pd(vbh, _mm256_sub_pd(vbh, vyhl)); vbl = _mm256_sub_pd(vyhl, vbh); //two_prod(a, b.hl); vp2 = _mm256_mul_pd(vxhh, vyhl); ve2 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah, vbh), vp2), _mm256_mul_pd(vah, vbl)), _mm256_mul_pd(val, vbh)), _mm256_mul_pd(val, vbl)); //[bh, bl] = Calc.split(bhl); vbh = _mm256_mul_pd(vcons, vylh); vbh = _mm256_sub_pd(vbh, _mm256_sub_pd(vbh, vylh)); vbl = _mm256_sub_pd(vylh, vbh); //two_prod(a, b.lh); vp3 = _mm256_mul_pd(vxhh, vylh); ve3 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah, vbh), vp3), _mm256_mul_pd(vah, vbl)), _mm256_mul_pd(val, vbh)), _mm256_mul_pd(val, vbl)); vp4 = _mm256_mul_pd(vxhh,vyll); //two_Sum(p2, e1); vs = _mm256_add_pd(vp2, ve1); vv =_mm256_sub_pd(vs, vp2); ve1 =_mm256_add_pd(_mm256_sub_pd(vp2 ,_mm256_sub_pd(vs,vv)),_mm256_sub_pd(ve1,vv)); vp2 = vs; //three_sum(p3, e2, e1); //two_Sum(p3, e2); vs = _mm256_add_pd(vp3, ve2); vv =_mm256_sub_pd(vs, vp3); veh =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); //two_Sum(s, e1); vp3 = _mm256_add_pd(vs, ve1); vv =_mm256_sub_pd(vp3, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(eh, s); ve2 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(ve2, veh); ve1 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(ve2,vv)), _mm256_sub_pd(vs,vv)); //three_sum2(p4, e3, e2); //two_Sum(p4, e3); vs = _mm256_add_pd(vp4, ve3); vv =_mm256_sub_pd(vs, vp4); veh =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); //two_Sum(s, e2); vp4 = _mm256_add_pd(vs, ve2); vv =_mm256_sub_pd(vp4, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(ve2,vv)); ve2 = _mm256_add_pd(veh, vs); vp5 = _mm256_add_pd(ve1, ve2); //renormalize(p1, p2, p3, p4, p5); vs = _mm256_add_pd(vp4, vp5); vt4 = _mm256_sub_pd(vp5, _mm256_sub_pd(vs, vp4)); vss = _mm256_add_pd(vp3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vp3)); vs = _mm256_add_pd(vp2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vp2)); vzhh = _mm256_add_pd(vp1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vp1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _dd_add_dd_avx2(__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the addition of double-double and double-double number with AVX2. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_add_dd_avx2(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d valo,__m256d vbhi,__m256d vblo) { __m256d vsh,vsl,vv,veh,vel,vchi_n,vclo_n; vsh = _mm256_add_pd(vahi,vbhi); vv = _mm256_sub_pd(vsh, vahi); veh = _mm256_add_pd(_mm256_sub_pd(vahi ,_mm256_sub_pd(vsh, vv)), _mm256_sub_pd(vbhi ,vv)); vsl = _mm256_add_pd(valo , vblo); veh = _mm256_add_pd(veh , vsl); vchi_n = _mm256_add_pd(vsh, veh); vclo_n = _mm256_sub_pd(veh, _mm256_sub_pd(vchi_n ,vsh)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _dd_add_dd_avx2_ieee(__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the addition of double-double and double-double number with AVX2. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_add_dd_avx2_ieee(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d valo,__m256d vbhi,__m256d vblo) { /* Compute double-double = double-double + double-double */ __m256d vs1,vs2,vbv,vt1,vt2,vchi_n,vclo_n; /* Add two high-order words. */ vs1 = _mm256_add_pd(vahi, vbhi); vbv = _mm256_sub_pd(vs1, vahi); vs2 = _mm256_add_pd(_mm256_sub_pd(vahi ,_mm256_sub_pd(vs1, vbv)), _mm256_sub_pd(vbhi ,vbv)); /* Add two low-order words. */ vt1 = _mm256_add_pd(valo, vblo); vbv = _mm256_sub_pd(vt1, valo); vt2 = _mm256_add_pd(_mm256_sub_pd(valo ,_mm256_sub_pd(vt1, vbv)), _mm256_sub_pd(vblo ,vbv)); vs2 = _mm256_add_pd(vs2, vt1); /* Renormalize (s1, s2) to (t1, s2) */ vt1 = _mm256_add_pd(vs1, vs2); vs2 = _mm256_sub_pd(vs2, _mm256_sub_pd(vt1, vs1)); vt2 = _mm256_add_pd(vt2, vs2); /* Renormalize (t1, t2) */ vchi_n = _mm256_add_pd(vt1, vt2); vclo_n = _mm256_sub_pd(vt2, _mm256_sub_pd(vchi_n, vt1)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _dd_mul_dd_avx2(__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double-double and double-double number with AVX2. * @param chi,clo output (double-double precision) * @param ahi input (double precision) * @param bhi,blo input (quad-double precision) * */ void _dd_mul_dd_avx2(__m256d* vchi,__m256d* vclo,__m256d vahi,__m256d valo,__m256d vbhi,__m256d vblo) { __m256d vt,vt1,vah,val,vbh,vbl,vp1,vp2,vchi_n,vclo_n; __m256d vcons=_mm256_set_pd(134217729,134217729,134217729,134217729); vt = _mm256_mul_pd(vcons, vahi); vah = _mm256_sub_pd(vt ,_mm256_sub_pd(vt, vahi)); val = _mm256_sub_pd(vahi, vah); vt1 = _mm256_mul_pd(vcons , vbhi); vbh = _mm256_sub_pd(vt1 ,_mm256_sub_pd(vt1, vbhi)); vbl = _mm256_sub_pd(vbhi ,vbh); vp1 = _mm256_mul_pd(vahi ,vbhi); vp2= _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(vah,vbh),vp1), _mm256_mul_pd(vah,vbl)), _mm256_mul_pd(val,vbh)), _mm256_mul_pd(val,vbl)); vp2 = _mm256_add_pd(vp2,_mm256_mul_pd(vahi,vblo)); vp2 = _mm256_add_pd(vp2,_mm256_mul_pd(valo,vbhi)); vchi_n =_mm256_add_pd(vp1 , vp2); vclo_n = _mm256_sub_pd(vp2, _mm256_sub_pd(vchi_n, vp1)); *vchi = vchi_n; *vclo = vclo_n; } /** * * @fn _dd_add_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the addition of double-double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl input (double-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _dd_add_qd_avx2(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vxhl,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vs1,vs2,vs3,vs4,vs5,ve1,vu,ve2,ve3,ve4,ve5,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; //two_sumtwoSum(x.hh,y.hh); vs1 = _mm256_add_pd(vxhh, vyhh); vv = _mm256_sub_pd(vs1, vxhh); ve1 = _mm256_add_pd( _mm256_sub_pd(vxhh, _mm256_sub_pd(vs1, vv)), _mm256_sub_pd(vyhh, vv)); //two_sumtwoSum(x.hl,y.hl); vt1 = _mm256_add_pd(vxhl, vyhl); vv = _mm256_sub_pd(vt1, vxhl); ve2 = _mm256_add_pd( _mm256_sub_pd(vxhl, _mm256_sub_pd(vt1, vv)), _mm256_sub_pd(vyhl, vv)); //two_sumtwoSum(t1, e1); vs2 = _mm256_add_pd(vt1, ve1); vv = _mm256_sub_pd(vs2, vt1); vu = _mm256_add_pd( _mm256_sub_pd(vt1, _mm256_sub_pd(vs2, vv)), _mm256_sub_pd(ve1, vv)); //two_sumtwoSum(e2, y.lh); vt2 = _mm256_add_pd(ve2, vylh); vv = _mm256_sub_pd(vt2, ve2); ve3 = _mm256_add_pd( _mm256_sub_pd(ve2, _mm256_sub_pd(vt2, vv)), _mm256_sub_pd(vylh, vv)); //two_sumtwoSum(u, t2); vs3 = _mm256_add_pd(vu, vt2); vv = _mm256_sub_pd(vs3, vu); vu = _mm256_add_pd( _mm256_sub_pd(vu, _mm256_sub_pd(vs3, vv)), _mm256_sub_pd(vt2, vv)); //two_sumtwoSum(u, e3); vt3 = _mm256_add_pd(vu, ve3); vv = _mm256_sub_pd(vt3, vu); ve4 = _mm256_add_pd( _mm256_sub_pd(vu, _mm256_sub_pd(vt3, vv)), _mm256_sub_pd(ve3, vv)); //two_sumtwoSum(t3, y.ll); vs4 = _mm256_add_pd(vt3, vyll); vv = _mm256_sub_pd(vs4, vt3); ve5 = _mm256_add_pd( _mm256_sub_pd(vt3, _mm256_sub_pd(vs4, vv)), _mm256_sub_pd(vyll, vv)); vs5 = _mm256_add_pd(ve4, ve5); //renormalize(s1, s2, s3, s4, s5); vs = _mm256_add_pd(vs4, vs5); vt4 = _mm256_sub_pd(vs5, _mm256_sub_pd(vs, vs4)); vss = _mm256_add_pd(vs3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vs3)); vs = _mm256_add_pd(vs2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vs2)); vzhh = _mm256_add_pd(vs1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vs1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _dd_mul_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of double-double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl input (double-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _dd_mul_qd_avx2(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vxhl,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d va1h,va1l,vbh,vbl,va2h,va2l,vsh,ve1,ve2,ve3,ve4,ve5,veh,vs1,vs2,vs3,vs4,vs5,vp1,vp2,vp3,vp4,vp5,ve,vv,vs,vss,vt1,vt2,vt3,vt4,vzhh,vzhl,vzlh,vzll; __m256d vcons=_mm256_set_pd(134217729,134217729,134217729,134217729); //mul //two_prod(a.hh, b.hh); vp1 = _mm256_mul_pd(vxhh, vyhh); //split(a) va1h = _mm256_mul_pd(vcons, vxhh); va1h = _mm256_sub_pd(va1h ,_mm256_sub_pd(va1h, vxhh)); va1l = _mm256_sub_pd(vxhh, va1h); //split(b) vbh = _mm256_mul_pd(vcons, vyhh); vbh = _mm256_sub_pd(vbh ,_mm256_sub_pd(vbh, vyhh)); vbl = _mm256_sub_pd(vyhh, vbh); ve1 = _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(va1h,vbh),vp1), _mm256_mul_pd(va1h,vbl)), _mm256_mul_pd(va1l,vbh)), _mm256_mul_pd(va1l,vbl)); //two_prod(a.hl, b.hh); vp2 = _mm256_mul_pd(vxhl,vyhh); //split(a) va2h = _mm256_mul_pd(vcons, vxhl); va2h = _mm256_sub_pd(va2h ,_mm256_sub_pd(va2h, vxhl)); va2l = _mm256_sub_pd(vxhl, va2h); ve2 = _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(va2h,vbh),vp2), _mm256_mul_pd(va2h,vbl)), _mm256_mul_pd(va2l,vbh)), _mm256_mul_pd(va2l,vbl)); //two_prod(a.hh, b.hl); vp3 = _mm256_mul_pd(vxhh,vyhl); //split(b) vbh = _mm256_mul_pd(vcons, vyhl); vbh = _mm256_sub_pd(vbh ,_mm256_sub_pd(vbh, vyhl)); vbl = _mm256_sub_pd(vyhl, vbh); ve3 = _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(va1h,vbh),vp3), _mm256_mul_pd(va1h,vbl)), _mm256_mul_pd(va1l,vbh)), _mm256_mul_pd(va1l,vbl)); //two_prod(a.hl, b.hl); vp4 = _mm256_mul_pd(vxhl,vyhl); ve4 = _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(va2h,vbh),vp4), _mm256_mul_pd(va2h,vbl)), _mm256_mul_pd(va2l,vbh)), _mm256_mul_pd(va2l,vbl)); //two_prod(a.hh, b.lh); vp5 = _mm256_mul_pd(vxhh,vylh); //split(b) vbh = _mm256_mul_pd(vcons, vylh); vbh = _mm256_sub_pd(vbh ,_mm256_sub_pd(vbh, vylh)); vbl = _mm256_sub_pd(vylh, vbh); ve5 = _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_sub_pd( _mm256_mul_pd(va1h,vbh),vp5), _mm256_mul_pd(va1h,vbl)), _mm256_mul_pd(va1l,vbh)), _mm256_mul_pd(va1l,vbl)); //three_sum(p2, p3, e1); //two_Sum(p2, p3); vs = _mm256_add_pd(vp2, vp3); vv =_mm256_sub_pd(vs, vp2); veh =_mm256_add_pd(_mm256_sub_pd(vp2 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(vp3,vv)); //two_Sum(s, e1); vp2 = _mm256_add_pd(vs, ve1); vv =_mm256_sub_pd(vp2, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp2,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(eh, s); vp3 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(vp3, veh); ve1 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(vs,vv)); //three_sum(p3, p4, p5); //two_Sum(p3, p4); vs = _mm256_add_pd(vp3, vp4); vv =_mm256_sub_pd(vs, vp3); veh =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(vp4,vv)); //two_Sum(s, p5); vp3 = _mm256_add_pd(vs, vp5); vv =_mm256_sub_pd(vp3, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp3,vv)), _mm256_sub_pd(vp5,vv)); //two_Sum(eh, s); vp4 = _mm256_add_pd(veh, vs); vv =_mm256_sub_pd(vp4, veh); vp5 =_mm256_add_pd(_mm256_sub_pd(veh ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(vs,vv)); //two_Sum(e2, e3); vs = _mm256_add_pd(ve2, ve3); vv =_mm256_sub_pd(vs, ve2); ve3 =_mm256_add_pd(_mm256_sub_pd(ve2 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); ve2 = vs; //two_Sum(p3, e2); vs = _mm256_add_pd(vp3, ve2); vv =_mm256_sub_pd(vs, vp3); ve2 =_mm256_add_pd(_mm256_sub_pd(vp3 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); vp3 = vs; //two_Sum(p4, e3); vs = _mm256_add_pd(vp4, ve3); vv =_mm256_sub_pd(vs, vp4); ve3 =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve3,vv)); vp4 = vs; //two_Sum(p4, e2); vs = _mm256_add_pd(vp4, ve2); vv =_mm256_sub_pd(vs, vp4); ve2 =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve2,vv)); vp4 = vs; vp5 = _mm256_add_pd(_mm256_add_pd(vp5, ve2),ve3); vss =_mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(vxhl,vylh),ve4),_mm256_add_pd(_mm256_mul_pd(vxhh,vyll),ve5)); //three_sum2(p4, e1, ss); //two_Sum(p4, e1); vs = _mm256_add_pd(vp4, ve1); vv =_mm256_sub_pd(vs, vp4); veh =_mm256_add_pd(_mm256_sub_pd(vp4 ,_mm256_sub_pd(vs,vv)), _mm256_sub_pd(ve1,vv)); //two_Sum(s, e2); vp4 = _mm256_add_pd(vs, vss); vv =_mm256_sub_pd(vp4, vs); vs =_mm256_add_pd(_mm256_sub_pd(vs ,_mm256_sub_pd(vp4,vv)), _mm256_sub_pd(vss,vv)); ve1 = _mm256_add_pd(veh, vs); vp5 = _mm256_add_pd(vp5, ve1); //renormalize(p1, p2, p3, p4, p5); vs = _mm256_add_pd(vp4, vp5); vt4 = _mm256_sub_pd(vp5, _mm256_sub_pd(vs, vp4)); vss = _mm256_add_pd(vp3, vs); vt3 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vp3)); vs = _mm256_add_pd(vp2, vss); vt2 = _mm256_sub_pd(vss, _mm256_sub_pd(vs, vp2)); vzhh = _mm256_add_pd(vp1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzhh, vp1)); vs = _mm256_add_pd(vt3, vt4); vt3 = _mm256_sub_pd(vt4, _mm256_sub_pd(vs, vt3)); vss = _mm256_add_pd(vt2, vs); vt2 = _mm256_sub_pd(vs, _mm256_sub_pd(vss, vt2)); vzhl = _mm256_add_pd(vt1, vss); vt1 = _mm256_sub_pd(vss, _mm256_sub_pd(vzhl, vt1)); vs = _mm256_add_pd(vt2, vt3); vt2 = _mm256_sub_pd(vt3, _mm256_sub_pd(vs, vt2)); vzlh = _mm256_add_pd(vt1, vs); vt1 = _mm256_sub_pd(vs, _mm256_sub_pd(vzlh, vt1)); vzll = _mm256_add_pd(vt1, vt2); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; } /** * * @fn _qd_add_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the addition of quad-double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl,xlh,xll input (quad-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _qd_add_qd_avx2(__m256d* vzhh,__m256d* vzhl,__m256d* vzlh,__m256d* vzll,__m256d vxhh,__m256d vxhl,__m256d vxlh,__m256d vxll,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vchh,vchl,vclh,vcll,e1,e2,e3,e4,v,sh,eh,t1,t2,t3,t4; //two_sum(&c0, &e1, a0, b0); vchh = _mm256_add_pd(vxhh, vyhh); v = _mm256_sub_pd(vchh, vxhh); e1 = _mm256_add_pd(_mm256_sub_pd(vxhh, _mm256_sub_pd(vchh, v)), _mm256_sub_pd(vyhh, v)); //two_sum(&c1, &e2, a1, b1); vchl = _mm256_add_pd(vxhl, vyhl); v = _mm256_sub_pd(vchl, vxhl); e2 = _mm256_add_pd(_mm256_sub_pd(vxhl, _mm256_sub_pd(vchl, v)), _mm256_sub_pd(vyhl, v)); //two_sum(&c1, &e1, c1, e1); e3 = _mm256_add_pd(vchl, e1); v = _mm256_sub_pd(e3, vchl); // check!!! e1 = _mm256_add_pd(_mm256_sub_pd(vchl, _mm256_sub_pd(e3, v)), _mm256_sub_pd(e1, v)); vchl=e3; //two_sum(&c2, &e3, a2, b2); vclh = _mm256_add_pd(vxlh, vylh); v = _mm256_sub_pd(vclh, vxlh); e3 = _mm256_add_pd(_mm256_sub_pd(vxlh, _mm256_sub_pd(vclh, v)), _mm256_sub_pd(vylh, v)); //three_sum(&c2, &e1, &e2, c2, e2, e1); sh = _mm256_add_pd(vclh, e2); v = _mm256_sub_pd(sh, vclh); eh = _mm256_add_pd(_mm256_sub_pd(vclh, _mm256_sub_pd(sh, v)), _mm256_sub_pd(e2, v)); vclh = _mm256_add_pd(sh, e1); v = _mm256_sub_pd(vclh, sh); sh = _mm256_add_pd(_mm256_sub_pd(sh, _mm256_sub_pd(vclh, v)), _mm256_sub_pd(e1, v)); e1 = _mm256_add_pd(eh, sh); v = _mm256_sub_pd(e1, eh); e2 = _mm256_add_pd(_mm256_sub_pd(eh, _mm256_sub_pd(e1, v)), _mm256_sub_pd(sh, v)); //two_sum(&c3, &e4, a3, b3); vcll = _mm256_add_pd(vxll, vyll); v = _mm256_sub_pd(vcll, vxll); e4 = _mm256_add_pd(_mm256_sub_pd(vxll, _mm256_sub_pd(vcll, v)), _mm256_sub_pd(vyll, v)); //three_sum2(&c3, &e1, c3, e3, e1); sh = _mm256_add_pd(vcll, e3); v = _mm256_sub_pd(sh, vcll); eh = _mm256_add_pd(_mm256_sub_pd(vcll, _mm256_sub_pd(sh, v)), _mm256_sub_pd(e3, v)); vcll = _mm256_add_pd(sh, e1); v = _mm256_sub_pd(vcll, sh); e1 = _mm256_add_pd(eh, _mm256_add_pd(_mm256_sub_pd(sh, _mm256_sub_pd(vcll,v)), _mm256_sub_pd(e1,v))); e1 = _mm256_add_pd(_mm256_add_pd(e1, e2), e4); eh=vchh; e2=vchl; e3=vclh; e4=vcll; // renormalize(&c0, &c1, &c2, &c3, c0, c1, c2, c3, e1); sh = _mm256_add_pd(e4, e1); t4 = _mm256_sub_pd(e1, _mm256_sub_pd(sh, e4)); v = _mm256_add_pd(e3, sh); t3 = _mm256_sub_pd(sh, _mm256_sub_pd(v, e3)); sh = _mm256_add_pd(e2, v); t2 = _mm256_sub_pd(v, _mm256_sub_pd(sh, e2)); vchh = _mm256_add_pd(eh, sh); t1 = _mm256_sub_pd(sh, _mm256_sub_pd(vchh, eh)); sh = _mm256_add_pd(t3, t4); t3 = _mm256_sub_pd(t4, _mm256_sub_pd(sh, t3)); v = _mm256_add_pd(t2, sh); t2 = _mm256_sub_pd(sh, _mm256_sub_pd(v, t2)); vchl = _mm256_add_pd(t1, v); t1 = _mm256_sub_pd(v, _mm256_sub_pd(vchl, t1)); sh = _mm256_add_pd(t2, t3); t2 = _mm256_sub_pd(t3, _mm256_sub_pd(sh, t2)); vclh = _mm256_add_pd(t1, sh); t1 = _mm256_sub_pd(sh, _mm256_sub_pd(vclh, t1)); vcll = _mm256_add_pd(t1, t2); *vzhh = vchh; *vzhl = vchl; *vzlh = vclh; *vzll = vcll; } /** * * @fn _qd_mul_qd_avx2(__m256d*,__m256d*,__m256d*,__m256d*,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d,__m256d) * @brief The function compute the multiplication of quad-double and quad-double number with AVX2. * @param chh,chl,clh,cll output (quad-double precision) * @param xhh,xhl,xlh,xll input (quad-double precision) * @param yhh,yhl,ylh,yll input (quad-double precision) * */ void _qd_mul_qd_avx2(__m256d* vchh,__m256d* vchl,__m256d* vclh,__m256d* vcll,__m256d vxhh,__m256d vxhl,__m256d vxlh,__m256d vxll,__m256d vyhh,__m256d vyhl,__m256d vylh,__m256d vyll) { __m256d vzhh,vzhl,vzlh,vzll,vp01,vp10,vp11,vp02,vp20,vah00,val00,vbh00,vbl00,vah01,val01,vbh01,vbl01,vah10,val10, vbh10,vbl10,vah11,val11,vbh11,vbl11,vah20,val20,vbh20,vbl20,vah02,val02,vbh02,vbl02; __m256d vcons=_mm256_set_pd(134217729,134217729,134217729,134217729); //two_prod(&c0[0], &al00, a0, b0); vzhh = _mm256_mul_pd(vxhh, vyhh); //[ah, al] = Calc.split(a); vah00 = _mm256_mul_pd(vcons, vxhh); vah00 = _mm256_sub_pd(vah00, _mm256_sub_pd(vah00, vxhh)); val00 = _mm256_sub_pd(vxhh, vah00); //[bh, bl] = Calc.split(b); vbh00 = _mm256_mul_pd(vcons, vyhh); vbh00 = _mm256_sub_pd(vbh00, _mm256_sub_pd(vbh00, vyhh)); vbl00 = _mm256_sub_pd(vyhh, vbh00); // no fma val00 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah00, vbh00), vzhh), _mm256_mul_pd(vah00, vbl00)), _mm256_mul_pd(val00, vbh00)), _mm256_mul_pd(val00, vbl00)); //O(eps) terms //two_prod(&p01, &bl00, a0, b1); vp01=_mm256_mul_pd(vxhh,vyhl); //split vah01= _mm256_mul_pd(vcons, vxhh); vah01=_mm256_sub_pd(vah01,_mm256_sub_pd(vah01,vxhh)); val01=_mm256_sub_pd(vxhh,vah01); //split vbh01= _mm256_mul_pd(vcons, vyhl); vbh01=_mm256_sub_pd(vbh01,_mm256_sub_pd(vbh01,vyhl)); vbl01=_mm256_sub_pd(vyhl,vbh01); //no fma vbl00=_mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah01, vbh01), vp01), _mm256_mul_pd(vah01, vbl01)), _mm256_mul_pd(val01, vbh01)), _mm256_mul_pd(val01, vbl01)); //two_prod(&p10, &al10, a1, b0); vp10=_mm256_mul_pd(vxhl,vyhh); //split vah10= _mm256_mul_pd(vcons, vxhl); vah10=_mm256_sub_pd(vah10,_mm256_sub_pd(vah10,vxhl)); val10=_mm256_sub_pd(vxhl,vah10); //split vbh10= _mm256_mul_pd(vcons, vyhh); vbh10= _mm256_sub_pd(vbh10,_mm256_sub_pd(vbh10,vyhh)); vbl10= _mm256_sub_pd(vyhh,vbh10); //no fma val10=_mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah10, vbh10), vp10), _mm256_mul_pd(vah10, vbl10)), _mm256_mul_pd(val10, vbh10)), _mm256_mul_pd(val10, vbl10)); //three_sum(&c1[0], &p10, &p01, p01, p10, al00); // [ah00, bh00] = Calc.twoSum(p01, p10); vah00=_mm256_add_pd(vp01,vp10); val01=_mm256_sub_pd(vah00,vp01); vbh00=_mm256_add_pd(_mm256_sub_pd(vp01,_mm256_sub_pd(vah00,val01)),_mm256_sub_pd(vp10,val01)); // [zhl, ah00] = Calc.twoSum(sh, al00); vzhl=_mm256_add_pd(vah00,val00); val01=_mm256_sub_pd(vzhl,vah00); vah00=_mm256_add_pd(_mm256_sub_pd(vah00,_mm256_sub_pd(vzhl,val01)),_mm256_sub_pd(val00,val01)); // [p10, p01] = Calc.twoSum(eh, sh); vp10=_mm256_add_pd(vbh00,vah00); val01=_mm256_sub_pd(vp10,vbh00); vp01=_mm256_add_pd(_mm256_sub_pd(vbh00,_mm256_sub_pd(vp10,val01)),_mm256_sub_pd(vah00,val01)); //--------------------------------------------------- //O(eps^2) terms //two_prod(&p02, &ah02, a0, b2); vp02=_mm256_mul_pd(vxhh,vylh); //split vah02= _mm256_mul_pd(vcons, vxhh); vah02=_mm256_sub_pd(vah02,_mm256_sub_pd(vah02,vxhh)); val02=_mm256_sub_pd(vxhh,vah02); //split vbh02= _mm256_mul_pd(vcons, vylh); vbh02=_mm256_sub_pd(vbh02,_mm256_sub_pd(vbh02,vylh)); vbl02=_mm256_sub_pd(vylh,vbh02); // no fma vah02 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah02, vbh02), vp02), _mm256_mul_pd(vah02, vbl02)), _mm256_mul_pd(val02, vbh02)), _mm256_mul_pd(val02, vbl02)); //two_prod(&p11, &bl11, a1, b1); vp11=_mm256_mul_pd(vxhl,vyhl); //split vah11= _mm256_mul_pd(vcons, vxhl); vah11=_mm256_sub_pd(vah11,_mm256_sub_pd(vah11,vxhl)); val11=_mm256_sub_pd(vxhl, vah11); //split vbh11= _mm256_mul_pd(vcons, vyhl); vbh11=_mm256_sub_pd(vbh11,_mm256_sub_pd(vbh11,vyhl)); vbl11=_mm256_sub_pd(vyhl,vbh11); // no fma vbl11 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah11, vbh11), vp11), _mm256_mul_pd(vah11, vbl11)), _mm256_mul_pd(val11, vbh11)), _mm256_mul_pd(val11, vbl11)); //two_prod(&p20, &al11, a2, b0); vp20=_mm256_mul_pd(vxlh,vyhh); //split vah20= _mm256_mul_pd(vcons, vxlh); vah20=_mm256_sub_pd(vah20,_mm256_sub_pd(vah20,vxlh)); val20=_mm256_sub_pd(vxlh,vah20); //split vbh20=_mm256_mul_pd(vcons, vyhh); vbh20=_mm256_sub_pd(vbh20, _mm256_sub_pd(vbh20, vyhh)); vbl20=_mm256_sub_pd(vyhh, vbh20); // no fma val11 = _mm256_add_pd(_mm256_add_pd(_mm256_add_pd(_mm256_sub_pd( _mm256_mul_pd(vah20, vbh20), vp20), _mm256_mul_pd(vah20, vbl20)), _mm256_mul_pd(val20, vbh20)), _mm256_mul_pd(val20, vbl20)); //six three sum for p10, bl00, al10, p02, p11, p20 //three_sum(&p10, &bl00, &al10, p10, bl00, al10); // [ah20, bh20] = Calc.twoSum(p10, bl00); vah20=_mm256_add_pd(vp10,vbl00); val20=_mm256_sub_pd(vah20,vp10); vbh20=_mm256_add_pd(_mm256_sub_pd(vp10,_mm256_sub_pd(vah20,val20)),_mm256_sub_pd(vbl00,val20)); // [p10, ah20] = Calc.twoSum(ah20, al10); vp10=_mm256_add_pd(vah20,val10); val20=_mm256_sub_pd(vp10,vah20); vah20=_mm256_add_pd(_mm256_sub_pd(vah20,_mm256_sub_pd(vp10,val20)),_mm256_sub_pd(val10,val20)); // [bl00, al10] = Calc.twoSum(bh20, ah20); vbl00=_mm256_add_pd(vbh20,vah20); val20=_mm256_sub_pd(vbl00,vbh20); val10=_mm256_add_pd(_mm256_sub_pd(vbh20,_mm256_sub_pd(vbl00,val20)),_mm256_sub_pd(vah20,val20)); //three_sum(&p02, &p11, &p20, p02, p11, p20); // [ah11, bh02] = Calc.twoSum(p02, p11); vah11=_mm256_add_pd(vp02,vp11); val02=_mm256_sub_pd(vah11,vp02); vbh02=_mm256_add_pd(_mm256_sub_pd(vp02,_mm256_sub_pd(vah11,val02)),_mm256_sub_pd(vp11,val02)); // [p02, ah11] = Calc.twoSum(ah11, p20); vp02=_mm256_add_pd(vah11,vp20); val02=_mm256_sub_pd(vp02,vah11); vah11= _mm256_add_pd(_mm256_sub_pd(vah11,_mm256_sub_pd(vp02,val02)),_mm256_sub_pd(vp20,val02)); // [p11, p20] = Calc.twoSum(bh02, ah11); vp11=_mm256_add_pd(vbh02,vbh02); val02=_mm256_sub_pd(vp11,vbh02); vp20=_mm256_add_pd(_mm256_sub_pd(vbh02,_mm256_sub_pd(vp11,val02)),_mm256_sub_pd(vah11,val02)); // two_sum(&c2[0], &p10, p02, p10); vzlh = _mm256_add_pd(vp02 , vp10); vbl02 = _mm256_sub_pd(vzlh , vp02); vp10 = _mm256_add_pd(_mm256_sub_pd(vp02 , _mm256_sub_pd(vzlh , vbl02)) , _mm256_sub_pd(vp10 , vbl02)); //two_sum(&p11, &bl00, p11, bl00); vbl20=vp11; vp11 = _mm256_add_pd(vbl20 , vbl00); vbl01 = _mm256_sub_pd(vp11 , vbl20); vbl00 = _mm256_add_pd(_mm256_sub_pd(vbl20 , _mm256_sub_pd(vp11 , vbl01)) , _mm256_sub_pd(vbl00 , vbl01)); //two_sum(&p10, &p11, p10, p11); vbl01=vp10; vp10 = _mm256_add_pd(vbl01 , vp11); vbl20 = _mm256_sub_pd(vp10 , vbl01); vp11 = _mm256_add_pd(_mm256_sub_pd(vbl01 , _mm256_sub_pd(vp10 , vbl20)) , _mm256_sub_pd(vp11 , vbl20)); //O(eps^4) terms val10 = _mm256_add_pd(_mm256_add_pd(val10 , vp20) , _mm256_add_pd(vbl00 , vp11)); //no fma //O(eps^3) terms vzll =_mm256_add_pd( _mm256_add_pd( _mm256_add_pd(vp10, _mm256_add_pd( _mm256_add_pd( _mm256_add_pd( _mm256_mul_pd(vxhh,vyll), _mm256_mul_pd(vxhl,vylh)), _mm256_mul_pd(vxlh,vyhl)), _mm256_mul_pd(vxll,vyhh))), _mm256_add_pd(vah02,vbl11)),val11); val00=vzhh; vbl00=vzhl; vah02=vzlh; val11=vzll; //renormalize(&c0[0], &c1[0], &c2[0], &c3[0], al00,bl00,ah02,al11, al10); vbl02 = _mm256_add_pd(val11 , val10); vbl01 = _mm256_sub_pd(val10 , _mm256_sub_pd(vbl02, val11)); vbh02 = _mm256_add_pd(vah02 , vbl02); vbh01 = _mm256_sub_pd(vbl02 , _mm256_sub_pd(vbh02 , vah02)); vbl02 = _mm256_add_pd(vbl00 , vbh02); val01 = _mm256_sub_pd(vbh02, _mm256_sub_pd(vbl02 , vbl00)); vzhh = _mm256_add_pd(val00 , vbl02); vah01 = _mm256_sub_pd(vbl02 , _mm256_sub_pd(vzhh , val00)); vbl02 = _mm256_add_pd(vbh01 , vbl01); vbh01 = _mm256_sub_pd(vbl01 ,_mm256_sub_pd(vbl02 , vbh01)); vbh02 = _mm256_add_pd(val01 , vbl02); val01 = _mm256_sub_pd(vbl02 , _mm256_sub_pd(vbh02 ,val01)); vzhl = _mm256_add_pd(vah01 , vbh02); vah01 = _mm256_sub_pd(vbh02 , _mm256_sub_pd(vzhl, vah01)); vbl02 = _mm256_add_pd(val01 , vbh01); val01 = _mm256_sub_pd(vbh01 , _mm256_sub_pd(vbl02 ,val01)); vzlh = _mm256_add_pd(vah01 , vbl02); vah01 = _mm256_sub_pd(vbl02 , _mm256_sub_pd(vzlh , vah01)); vzll = _mm256_add_pd(vah01 , val01); *vchh = vzhh; *vchl = vzhl; *vclh = vzlh; *vcll = vzll; }
DRB018-plusplus-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Data race on outLen due to ++ operation. Adding private (outLen) can avoid race condition. But it is wrong semantically. Data races on outLen also cause output[outLen++] to have data races. Data race pairs (we allow two pairs to preserve the original code pattern): 1. outLen@72 vs. outLen@72 2. output[]@72 vs. output[]@72 */ #include <stdlib.h> #include <stdio.h> int input[1000]; int output[1000]; int main() { int i; int inLen = 1000; int outLen = 0; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<inLen; ++ i) { input[i]=i; } #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<inLen; ++ i) { output[outLen ++ ]=input[i]; } printf("output[500]=%d\n", output[500]); _ret_val_0=0; return _ret_val_0; }
cameraFast.h
#pragma once #include "accelerationStructure/fastNodeManager.h" #include "camera.h" #include "glmInclude.h" #include "timing.h" #include "cacheSimulator.h" #include "timing.h" #include "util.h" #include "fastRay.h" #include <iostream>// writing on a text file #include <fstream> #include <algorithm> #include <vector> #include <execution>//for the parallel for #include <numeric>// std::accumulate #include <omp.h>//openMp template <unsigned gangSize, unsigned nodeMemory, unsigned workGroupSize > class FastNodeManager; #define perRayCacheAnalysis false //Fast version of camera that does no intersection counters but is only for performance analysis class CameraFast : public Camera { private: std::vector<std::chrono::nanoseconds> timesRay; std::vector<std::chrono::nanoseconds> timesTriangles; std::string problemPrefix; bool saveDistance; bool wideRender; public: //only black and white image for now -> image is only height * width and not height * width * 4 CameraFast(std::string path, std::string name, std::string problem, std::string problemPrefix, int nonTemplateWorkGroupSize, bool saveDistance, bool wideRender, std::vector<glm::vec3>& positions, std::vector<glm::vec3>& lookCenters, size_t width = 1920, size_t height = 1088, glm::vec3 upward = glm::vec3(0, 1, 0), float focalLength = 0.866f); CameraFast(std::string path, std::string name, std::string problem, std::string problemPrefix, int nonTemplateWorkGroupSize, bool saveDistance, bool wideRender, std::vector<glm::mat4>& transforms, size_t width = 1920, size_t height = 1088, float focalLength = 0.866f); //renders 6 images, We take median of the last 5 renders template <unsigned gangSize, unsigned nodeMemory, unsigned workGroupSize> void renderImages(const bool saveImage, FastNodeManager<gangSize, nodeMemory, workGroupSize>& nodeManager, const unsigned ambientSampleCount, const float ambientDistance, const bool mute, const bool wideAlternative, bool saveRayTimes, bool doCacheAnalysis) { int cameraCount = positions.size(); //all values we store: float totalTime = 0; float timeSumEachRay = 0; float timeSumTriangle = 0; //has to be an odd number for correct median int sampleCount = 5; //total raytracer time, time sum each ray, time sum triangle cost std::vector<std::tuple<float, float, float>> results; results.reserve(sampleCount); if (doCacheAnalysis) { #if perRayCacheAnalysis if (wideRender) { std::cerr << "Stopped wide renderer for per ray cache analysis." << std::endl; return; } #endif for (int cameraId = 0; cameraId < cameraCount; cameraId++) { nodeManager.cache.resetEverything(); renderImage<gangSize, nodeMemory, workGroupSize, true>(saveImage, nodeManager, ambientSampleCount, ambientDistance, cameraId, wideAlternative, false); } return; } //image render: if (saveImage) { for (int cameraId = 0; cameraId < cameraCount; cameraId++) { //if image should be saved one first run to save the image. Just to be sure we still do one render we trough away before performance measurements renderImage<gangSize, nodeMemory, workGroupSize, false>(saveImage, nodeManager, ambientSampleCount, ambientDistance, cameraId, wideAlternative, false); } } //loop over the different camera positions for (int cameraId = 0; cameraId < cameraCount; cameraId++) { //"first" render to load everything in cache renderImage<gangSize, nodeMemory, workGroupSize, false>(false, nodeManager, ambientSampleCount, ambientDistance, cameraId, wideAlternative, false); //median. (median of overall time or of each individual time? for (int i = 0; i < sampleCount; i++) { results.push_back(renderImage<gangSize, nodeMemory, workGroupSize, false>(false, nodeManager, ambientSampleCount, ambientDistance, cameraId, wideAlternative, false)); } //calculate median //this sorts by the first value of the tupel (the total raytracer time) sort(results.begin(), results.end()); auto median = results[sampleCount / 2]; totalTime += std::get<0>(median); timeSumEachRay += std::get<1>(median); timeSumTriangle += std::get<2>(median); results.clear(); } //doDetailedTimings: if (saveRayTimes) { for (int cameraId = 0; cameraId < cameraCount; cameraId++) { renderImage<gangSize, nodeMemory, workGroupSize, false>(false, nodeManager, ambientSampleCount, ambientDistance, cameraId, wideAlternative, true); } } totalTime /= (float)cameraCount; timeSumEachRay /= (float)cameraCount; timeSumTriangle /= (float)cameraCount; //output if (!mute) { std::cout << "Raytracing took " << totalTime << " seconds." << std::endl; std::cout << "ray sum took " << timeSumEachRay << " seconds." << std::endl; std::cout << "triangleTests took " << timeSumTriangle << " seconds." << std::endl; std::cout << "all rays minus triangle took " << timeSumEachRay - timeSumTriangle << " seconds." << std::endl; } std::string workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_Normal"; if (wideRender) { if (wideAlternative == 0) { workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_WideOld"; } else { workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_Wide"; } } std::ofstream myfile(path + workGroupName + "/" + name + problem + problemPrefix + "_Perf.txt"); if (myfile.is_open()) { myfile << "scenario " << name << " with branching factor of " << nodeManager.branchingFactor << " and leafsize of " << nodeManager.leafSize << std::endl; myfile << "with a gang size of " << gangSize << ", a node meory of " << nodeMemory << ", and a leaf memory of " << nodeManager.leafMemory << std::endl; myfile << "Raytracer total time: " << totalTime << std::endl; myfile << "Time for all rays (SUM): " << timeSumEachRay << std::endl; myfile << "Time for triangle intersections (SUM): " << timeSumTriangle << std::endl; myfile << "Time all rays(sum) - triangle(sum): " << timeSumEachRay - timeSumTriangle << std::endl; //add some extra information needed for final analysis (tri count and average tree depth) myfile << "Average BVH depth: " << nodeManager.averageBvhDepth << std::endl; myfile << "Triangle Count: " << nodeManager.triangleCount << std::endl; } else { std::cerr << "unable to open perf output file!" << std::endl; } myfile.close(); } template <unsigned gangSize, unsigned nodeMemory, unsigned workGroupSize, bool doCache> std::tuple<float, float, float> renderImage(const bool saveImage, FastNodeManager<gangSize, nodeMemory, workGroupSize>& nodeManager, const unsigned ambientSampleCount, const float ambientDistance, int cameraId, bool wideAlternative, bool saveRayTimes) { //fillRenderInfo(); auto timeBeginRaytracer = getTime(); bool wideRefill = false; //similar performance compared to openMp versions, but openMp is more consistent (large difference) // std::for_each(std::execution::par_unseq, renderInfos.begin(), renderInfos.end(), // [&](auto& info) //dynamic 4 is the most consistent but the difference is not that large (test where done with the exe and windwos realtime mode, 30 samples) //This result is good because dynamic is the shedule that is most suited for for out problem since every ray can take differently long //#pragma omp parallel for schedule(dynamic, 4) //helper struct for per ray cache analysis struct CachePerRayResultStorage { std::vector<std::vector<int>>stackCacheLoads; std::vector<std::vector<int>>stackCacheHits; std::vector<std::vector<int>>heapCacheLoads; std::vector<std::vector<int>>heapCacheHits; void initialize() { int threadCount = omp_get_max_threads(); stackCacheLoads.resize(threadCount); stackCacheHits.resize(threadCount); heapCacheLoads.resize(threadCount); heapCacheHits.resize(threadCount); for (int i = 0; i < threadCount; i++) { stackCacheLoads[i].resize(workGroupSquare, 0); stackCacheHits[i].resize(workGroupSquare, 0); heapCacheLoads[i].resize(workGroupSquare, 0); heapCacheHits[i].resize(workGroupSquare, 0); } } //updates local counters and resets the counter of the cache. ONLY for current thread void updateAndResetCounterThread(CacheSimulator& cache, int rayId) { //read the thread cache info after each ray int tId = omp_get_thread_num(); stackCacheLoads[tId][rayId] += cache.stackCacheLoads[tId]; stackCacheHits[tId][rayId] += cache.stackCacheHits[tId]; heapCacheLoads[tId][rayId] += cache.heapCacheLoads[tId]; heapCacheHits[tId][rayId] += cache.heapCacheHits[tId]; //reset counter of this thread after each ray cache.resetThisThreadCounter(); } std::string outputLinePerRay(int rayId, int threadCount, int workGroupCount) { std::string result; float stackLoadSum = 0; float stackMissSum = 0; float heapLoadSum = 0; float heapMissSum = 0; for (int i = 0; i < threadCount; i++) { stackLoadSum += stackCacheLoads[i][rayId]; stackMissSum += stackCacheLoads[i][rayId] - stackCacheHits[i][rayId]; heapLoadSum += heapCacheLoads[i][rayId]; heapMissSum += heapCacheLoads[i][rayId] - heapCacheHits[i][rayId]; } //divided by the number of workgroups (we have one 1.ray per workgroup) int divisor = workGroupCount; stackLoadSum /= (float)divisor; stackMissSum /= (float)divisor; heapLoadSum /= (float)divisor; heapMissSum /= (float)divisor; result = std::to_string(stackLoadSum) + ", " + std::to_string(stackMissSum) + ", " + std::to_string(heapLoadSum) + ", " + std::to_string(heapMissSum); return result; } }; struct CacheResultStorage { //create per workgroupo storage for hits, and loads std::vector<uint64_t> stackCacheLoads; std::vector<uint64_t> stackCacheHits; std::vector<uint64_t> heapCacheLoads; std::vector<uint64_t> heapCacheHits; void initialize(int size) { stackCacheLoads.resize(size, 0); stackCacheHits.resize(size, 0); heapCacheLoads.resize(size, 0); heapCacheHits.resize(size, 0); } void updateAndResetCounterThread(CacheSimulator& cache, int workGroupId) { //read the thread cache info after each ray int tId = omp_get_thread_num(); stackCacheLoads[workGroupId] += cache.stackCacheLoads[tId]; stackCacheHits[workGroupId] += cache.stackCacheHits[tId]; heapCacheLoads[workGroupId] += cache.heapCacheLoads[tId]; heapCacheHits[workGroupId] += cache.heapCacheHits[tId]; //reset counter of this thread after each ray cache.resetThisThreadCounter(); } std::string outputLinePerWorkGroup(int workGroupId) { std::string result; result = std::to_string(stackCacheLoads[workGroupId]) + ", " + std::to_string(stackCacheLoads[workGroupId] - stackCacheHits[workGroupId]) + ", " + std::to_string(heapCacheLoads[workGroupId]) + ", " + std::to_string(heapCacheLoads[workGroupId] - heapCacheHits[workGroupId]); return result; } }; #if perRayCacheAnalysis CachePerRayResultStorage cachePrimaryResult; CachePerRayResultStorage cacheSecondaryResult; if constexpr (doCache) { cachePrimaryResult.initialize(); cacheSecondaryResult.initialize(); } #else //create per workgroupo storage for hits, and loads CacheResultStorage cachePrimaryResult; CacheResultStorage cacheSecondaryResult; if constexpr (doCache) { int workGroupCount = (width * height) / workGroupSquare; cachePrimaryResult.initialize(workGroupCount); cacheSecondaryResult.initialize(workGroupCount); } #endif if (!wideRender) { #pragma omp parallel for schedule(dynamic, 4) for (int i = 0; i < (width / workGroupSize) * (height / workGroupSize); i++) { #if perRayCacheAnalysis if constexpr (doCache) { //Full reset of cache at beginning of workgroup (for per ray analysis) nodeManager.cache.resetThisThread(); } #endif //data to do secondary rays: (frist ray, second is surface normal (NAN if no hit)) std::array<std::pair<FastRay, glm::vec3>, workGroupSquare> secondaryRayData; for (int j = 0; j < workGroupSquare; j++) { RenderInfo info( ((i * workGroupSize) % width - width / 2.f) + j % workGroupSize, -(((i * workGroupSize) / width) * workGroupSize - height / 2.f) - (j / workGroupSize), i * workGroupSquare + j); #if doTimer auto timeBeforeRay = getTime(); #endif nanoSec timeTriangleTest(0); FastRay ray(positions[cameraId], getRayTargetPosition(info, cameraId)); uint32_t leafIndex = 0; uint8_t triIndex = 0; //shoot primary ray. bool result; if (saveDistance) { result = nodeManager.intersectSaveDistance<doCache>(ray, leafIndex, triIndex, timeTriangleTest); } else { result = nodeManager.intersect<doCache>(ray, leafIndex, triIndex, timeTriangleTest); } if constexpr (doCache) { #if perRayCacheAnalysis cachePrimaryResult.updateAndResetCounterThread(nodeManager.cache, j); #else cachePrimaryResult.updateAndResetCounterThread(nodeManager.cache, i); #endif } if (result) { //prepare secondary rays (traversal is done after the primary rays) //get surface normal and position from triangle index info. glm::vec3 surfaceNormal(0); glm::vec3 surfacePosition(0); nodeManager.getSurfaceNormalPosition(ray, surfaceNormal, surfacePosition, leafIndex, triIndex); ray.pos = surfacePosition + surfaceNormal * 0.001f; //nodeManager.getSurfaceNormalTri(ray, surfaceNormal); secondaryRayData[j] = std::make_pair(ray, surfaceNormal); } else { secondaryRayData[j] = std::make_pair(ray, glm::vec3(NAN)); } //important to override previous data since we do more than one run. timesTriangles[info.index] = timeTriangleTest; #if doTimer timesRay[info.index] = getTimeSpan(timeBeforeRay); #endif } //now secondary ray: for (int j = 0; j < workGroupSquare; j++) { FastRay ray = secondaryRayData[j].first; glm::vec3 surfaceNormal = secondaryRayData[j].second; nanoSec timeTriangleTest(0); #if doTimer auto timeBeforeRay = getTime(); #endif int index = i * workGroupSquare + j; uint8_t imageResult = 0; uint16_t ambientResult = 0; if (!isnan(surfaceNormal.x)) { for (size_t i = 0; i < ambientSampleCount; i++) { //deterministic random direction auto direction = getAmbientDirection(index, surfaceNormal, i); ray.updateDirection(direction); ray.tMax = ambientDistance; //shoot secondary ray if (nodeManager.intersectSecondary<doCache>(ray, timeTriangleTest)) { ambientResult++; } } float factor = 1 - (ambientResult / (float)ambientSampleCount); imageResult = (uint8_t)(factor * 255); } //add time results to primary ray times: timesTriangles[index] += timeTriangleTest; #if doTimer timesRay[index] += getTimeSpan(timeBeforeRay); #endif //distance render version (for large scenes) //float distanceToCamera = glm::distance(ray.surfacePosition, ray.pos); //imageResult = (uint8_t)(distanceToCamera / 50.f); image[index * 4 + 0] = imageResult; image[index * 4 + 1] = imageResult; image[index * 4 + 2] = imageResult; image[index * 4 + 3] = 255; //render ambient sum average: //ambientSum = glm::normalize(ambientSum); //image[info.index * 4 + 0] = (uint8_t)(ambientSum.x * 127 + 127); //image[info.index * 4 + 1] = (uint8_t)(ambientSum.y * 127 + 127); //image[info.index * 4 + 2] = (uint8_t)(ambientSum.z * 127 + 127); //surface normal render version: //image[info.index * 4 + 0] = (uint8_t)(ray.surfaceNormal.x * 127 + 127); //image[info.index * 4 + 1] = (uint8_t)(ray.surfaceNormal.y * 127 + 127); //image[info.index * 4 + 2] = (uint8_t)(ray.surfaceNormal.z * 127 + 127); if constexpr (doCache) { #if perRayCacheAnalysis cacheSecondaryResult.updateAndResetCounterThread(nodeManager.cache, j); #else cacheSecondaryResult.updateAndResetCounterThread(nodeManager.cache, i); #endif } } } } else if (!wideRefill) { //ultra wide version #pragma omp parallel for schedule(dynamic, 4) for (int i = 0; i < (width / workGroupSize) * (height / workGroupSize); i++) { #if doTimer auto timeBeforeRay = getTime(); #endif //prepare primary ray std::array<FastRay, workGroupSquare> rays; //initialization doesnt matter since we get hit result from triIndex std::array<uint32_t, workGroupSquare> leafIndex; //leafIndex.fill(0); //hit is encoded in triIndex (when its not -1) std::array<int8_t, workGroupSquare> triIndex; triIndex.fill(-1); nanoSec timeTriangleTest(0); int tmp0 = ((i * workGroupSize) / width) * workGroupSize - height / 2; int tmp1 = ((i * workGroupSize) % width - width / 2); for (int j = 0; j < workGroupSquare; j++) { glm::vec3 targetPos = getRayTargetPosition( (-tmp0 - (j / workGroupSize)), tmp1 + j % workGroupSize, cameraId); rays[j] = FastRay(positions[cameraId], targetPos); } //shoot primary ray if (wideAlternative) { nodeManager.intersectWideAlternative<doCache>(rays, leafIndex, triIndex, timeTriangleTest); } else { nodeManager.intersectWide<doCache>(rays, leafIndex, triIndex, timeTriangleTest); } #if !perRayCacheAnalysis if constexpr (doCache) { cachePrimaryResult.updateAndResetCounterThread(nodeManager.cache, i); } #endif //prepare secondary rays: std::array<uint16_t, workGroupSquare> ambientResult; ambientResult.fill(0); std::array<glm::vec3, workGroupSquare> surfaceNormal; //surfaceNormal.fill(glm::vec3(0)); for (int j = 0; j < workGroupSquare; j++) { if (triIndex[j] != -1) { //get surface normal and position from triangle index info. glm::vec3 surfacePosition(0); nodeManager.getSurfaceNormalPosition(rays[j], surfaceNormal[j], surfacePosition, leafIndex[j], triIndex[j]); //this way of "fixing" the position allows us to keep it for different ambient rays rays[j].pos = surfacePosition + surfaceNormal[j] * 0.001f; } else { //no hit for primary ray, deosnt happen that often. rays[j].pos = glm::vec3(NAN); surfaceNormal[j] = glm::vec3(1.f); } } for (int a = 0; a < ambientSampleCount; a++) { //final preparation of secondary rays for (int j = 0; j < workGroupSquare; j++) { int index = i * workGroupSquare + j; //get deterministic random direction from ray index auto direction = getAmbientDirection(index, surfaceNormal[j], a); rays[j].updateDirection(direction); rays[j].tMax = ambientDistance; } //shoot secondary ray: if (wideAlternative) { nodeManager.intersectSecondaryWideAlternative<doCache>(rays, ambientResult, timeTriangleTest); } else { nodeManager.intersectSecondaryWide<doCache>(rays, ambientResult, timeTriangleTest); } } for (int j = 0; j < workGroupSquare; j++) { int index = i * workGroupSquare + j; float factor = 1 - (ambientResult[j] / (float)ambientSampleCount); //factor = (factor + 1) / 2.f; uint8_t imageResult = (uint8_t)(factor * 255); //imageResult = i; image[index * 4 + 0] = imageResult; image[index * 4 + 1] = imageResult; image[index * 4 + 2] = imageResult; image[index * 4 + 3] = 255; } //time for rays: #if doTimer timesRay[i] = getTimeSpan(timeBeforeRay); #endif timesTriangles[i] = timeTriangleTest; #if !perRayCacheAnalysis if constexpr (doCache) { cacheSecondaryResult.updateAndResetCounterThread(nodeManager.cache, i); } #endif } } else { //REFILL (WORK in progress) //first "lazy" version, work for each thread is predetermined determined by threadId and max threadCount int maxThreadCount = omp_get_max_threads(); //not sure if i use openMp correctly. It might be possible that it doesnt spawn max threads? //in debug it also only uses thread 0... #pragma omp parallel { //TODO: remove debug prints int tId = omp_get_thread_num(); //<- this implementation doesnt work in debug because tId is always 0 int threadSum = (width / workGroupSize) * (height / workGroupSize); int beginId = threadSum / maxThreadCount * tId; int endId = threadSum / maxThreadCount * (tId + 1); if (tId == maxThreadCount - 1) { endId = threadSum; } int currentId = beginId; RefillStructure<nodeMemory, workGroupSize> dataStruct; int tmp0 = ((currentId * workGroupSize) / width) * workGroupSize - height / 2; int tmp1 = ((currentId * workGroupSize) % width - width / 2); for (int j = 0; j < workGroupSquare; j++) { glm::vec3 targetPos = getRayTargetPosition( (-tmp0 - (j / workGroupSize)), (tmp1 + (j % workGroupSize)), cameraId); dataStruct.rays[j] = FastRay(positions[cameraId], targetPos); dataStruct.rayId[j] = currentId * workGroupSquare + j; } dataStruct.fillRayMajorAxis(); currentId++; //half tells if we already have loaded half of the current block uint8_t half = 0; bool lastRun = false; //primary rays: while (true) { //structure: //call traversal, traversal stops when it reaches 50% utilization -> we refill here and start next traversal //for refill we need to save the result of finished rays and then fill in the new rays. //could also reorder unfinished rays but i dont think that is necessary //in order to store results i need to keep track of ray id. (will do this with a separate array) if (currentId >= endId && !half) { lastRun = true; } nodeManager.intersectRefillWideAlternative<doCache>(dataStruct, lastRun); //refill dataStruct int tmp0 = ((currentId * workGroupSize) / width) * workGroupSize - height / 2; int tmp1 = ((currentId * workGroupSize) % width - width / 2); int fillCount = 0; for (int j = 0; j < workGroupSquare; j++) { if (isnan(dataStruct.rays[j].tMax)) { //check if not already written if (image[dataStruct.rayId[j] * 4 + 0] != 0) { std::cerr << "why" << std::endl; } //read hit info if (dataStruct.triIndex[j] >= 0) { image[dataStruct.rayId[j] * 4 + 0] = 255 * (currentId / (float)endId); image[dataStruct.rayId[j] * 4 + 1] = 255 * (tId / (float)maxThreadCount); image[dataStruct.rayId[j] * 4 + 2] = 255; image[dataStruct.rayId[j] * 4 + 3] = 255; dataStruct.triIndex[j] = -1; } else { image[dataStruct.rayId[j] * 4 + 0] = 0; image[dataStruct.rayId[j] * 4 + 1] = 0; image[dataStruct.rayId[j] * 4 + 2] = 0; image[dataStruct.rayId[j] * 4 + 3] = 0; dataStruct.triIndex[j] = -1; } if (!lastRun) { //reset ray: int rayId = fillCount + half * (workGroupSquare / 2); glm::vec3 targetPos = getRayTargetPosition( (-tmp0 - (rayId / workGroupSize)), (tmp1 + (rayId % workGroupSize)), cameraId); dataStruct.rays[j] = FastRay(positions[cameraId], targetPos); dataStruct.rayId[j] = currentId * workGroupSquare + rayId; dataStruct.stack[0][j] = 0; dataStruct.stackIndex[j] = 1; (*dataStruct.currentWork)[dataStruct.nodeRays++] = j; fillCount++; if (fillCount == workGroupSquare / 2) { break; } } } } if (lastRun) { break; } //TODO: this recomputes the axis for all. dataStruct.fillRayMajorAxis(); if (fillCount != workGroupSquare / 2) { std::cerr << "was not able to refill correct ammount of rays" << std::endl; } currentId += half; half = (half + 1) % 2; } if (dataStruct.nodeRays != 0) { std::cerr << "didnt finish all rays" << std::endl; } //secondary rays: //while (true) //{ // //} } } nanoSec totalTime = getTimeSpan(timeBeginRaytracer); nanoSec timeRaySum = std::accumulate(timesRay.begin(), timesRay.end(), nanoSec(0)); nanoSec timeTrianglesSum = std::accumulate(timesTriangles.begin(), timesTriangles.end(), nanoSec(0)); std::string workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_Normal"; if (wideRender) { if (wideAlternative == 0) { workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_WideOld"; } else { workGroupName = "/WorkGroupSize_" + std::to_string(workGroupSize) + "_Wide"; } } #if perRayCacheAnalysis //per ray analysis: if constexpr (doCache) { std::cerr << "doing Per ray cache miss analysis" << std::endl; if (!wideRender) { std::string filenameCachePrimary = path + "/WorkGroupSize_" + std::to_string(workGroupSize) + "_Normal/" + name + "_PerRayCache_Cache" + std::to_string(nodeManager.cache.cacheSize) + problem + problemPrefix + ".txt"; std::ofstream file(filenameCachePrimary); if (file.is_open()) { file << "rayId, stackCacheLoads, stackCacheMiss, heapCacheLoads, heapCacheMiss, " << "secondaryStackCacheLoads, secondaryStackCacheMiss, secondaryHeapCacheLoads, secondaryHeapCacheMiss" << std::endl; int threadCount = omp_get_max_threads(); int workGroupCount = (width * height) / workGroupSquare; for (int j = 0; j < workGroupSquare; j++) { file << j << ", " << cachePrimaryResult.outputLinePerRay(j, threadCount, workGroupCount) << ", " << cacheSecondaryResult.outputLinePerRay(j, threadCount, workGroupCount) << std::endl; } } } } #else //normal cache analysis. (per workgroup) if constexpr (doCache) { std::string filenamePrimary = path + workGroupName + "/" + name + "_PerWorkgroupCache_Cache" + std::to_string(nodeManager.cache.cacheSize) + problem + problemPrefix + ".txt"; std::ofstream file(filenamePrimary); if (file.is_open()) { file << "workGroupId, stackCacheLoads, stackCacheMiss, heapCacheLoads, heapCacheMiss, " << "secondaryStackCacheLoads, secondaryStackCacheMiss, secondaryHeapCacheLoads, secondaryHeapCacheMiss" << std::endl; //go trough all workgroups and write cache loads and hits for (int i = 0; i < (width * height) / workGroupSquare; i++) { file << i << ", " << cachePrimaryResult.outputLinePerWorkGroup(i) << ", " << cacheSecondaryResult.outputLinePerWorkGroup(i) << std::endl; } } } #endif //for detailed timing analysis, save times of each ray. if (saveRayTimes) { if (wideRender) { std::string timingFileName = path + workGroupName + "/" + name + "_RayPerformanceWideV" + std::to_string(wideAlternative) + "_c" + std::to_string(cameraId) + ".txt"; std::ofstream file(timingFileName); if (file.is_open()) { file << "totalTime, nodeTime, leafTime" << std::endl; for (int i = 0; i < (height / nonTemplateWorkGroupSize) * (width / nonTemplateWorkGroupSize); i++) { file << timesRay[i].count() / (float)workGroupSquare << ", "; file << (timesRay[i] - timesTriangles[i]).count() / (float)workGroupSquare << ", "; file << timesTriangles[i].count() / (float)workGroupSquare << std::endl; } } else { std::cerr << "unable to open ray performance output file" << std::endl; } } else { std::string timingFileName = path + workGroupName + "/" + name + "_RayPerformance_c" + std::to_string(cameraId) + ".txt"; std::ofstream file(timingFileName); if (file.is_open()) { file << "totalTime, nodeTime, leafTime" << std::endl; for (int i = 0; i < width * height; i++) { file << timesRay[i].count() << ", "; file << (timesRay[i] - timesTriangles[i]).count() << ", "; file << timesTriangles[i].count() << std::endl; } } else { std::cerr << "unable to open ray performance output file" << std::endl; } } } if (saveImage) { //reorder image before we save it. std::vector<uint8_t> imageCorrect; imageCorrect.resize(height * width * 4); for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { //one line of smallsize int id = (w / workGroupSize) * workGroupSquare; id += w % workGroupSize; id += (h % workGroupSize) * workGroupSize; id += (h / workGroupSize) * width * workGroupSize; int idOrig = w + h * width; imageCorrect[idOrig * 4 + 0] = image[id * 4 + 0]; imageCorrect[idOrig * 4 + 1] = image[id * 4 + 1]; imageCorrect[idOrig * 4 + 2] = image[id * 4 + 2]; imageCorrect[idOrig * 4 + 3] = image[id * 4 + 3]; } } encodeTwoSteps(path + "/" + name + "_c" + std::to_string(cameraId) + "_Perf.png", imageCorrect, width, height); } return std::make_tuple(getTimeFloat(totalTime), getTimeFloat(timeRaySum), getTimeFloat(timeTrianglesSum)); } };
PAddOP.h
#ifndef PAddOP #define PAddOP /* * PAddOP.h: * (pointwise) add * * Created on: June 13, 2017 * Author: mszhang */ #include "Eigen/Dense" #include "MyLib.h" #include "Node.h" #include "Graph.h" class PAddNode : public Node { public: vector<PNode> ins; ~PAddNode() { ins.clear(); } public: PAddNode() : Node() { ins.clear(); node_type = "point-add"; } inline void clearValue() { ins.clear(); Node::clearValue(); } public: void forward(Graph *cg, const vector<PNode>& x) { if (x.size() == 0) { std::cout << "empty inputs for add" << std::endl; return; } ins.clear(); for (int i = 0; i < x.size(); i++) { if (x[i]->val.dim == dim) { ins.push_back(x[i]); } else { std::cout << "dim does not match" << std::endl; } } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5, PNode x6) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } if (x6->dim == dim) { ins.push_back(x6); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } public: inline void compute() { int nSize = ins.size(); val.zero(); for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < dim; idx++) { val[idx] += ins[i]->val[idx]; } } } void backward() { int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < dim; idx++) { ins[i]->loss[idx] += loss[idx]; } } } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding bool typeEqual(PNode other) { if (!Node::typeEqual(other)) { return false; } PAddNode *add = static_cast<PAddNode*>(other); return ins.size() == add->ins.size(); } size_t typeHashCode() const override { return (std::hash<int>{}(ins.size()) << 1) ^ Node::typeHashCode(); } }; class PAddExecute : public Execute { public: int in_count; int dim; Tensor2D drop_mask; public: Tensor1D x, y; int sumDim; bool bTrain; #if USE_GPU void forward() { int count = batch.size(); drop_mask.init(dim, count); CalculateDropMask(count, dim, drop_mask); std::vector<std::vector<dtype*>> in_vals; in_vals.reserve(in_count); for (int i = 0; i < in_count; ++i) { std::vector<dtype*> ins; ins.reserve(count); for (PNode n : batch) { PAddNode *padd = static_cast<PAddNode*>(n); ins.push_back(padd->ins.at(i)->val.value); } in_vals.push_back(ins); } std::vector<dtype *> outs; outs.reserve(count); for (PNode n : batch) { PAddNode *padd = static_cast<PAddNode*>(n); outs.push_back(padd->val.value); } n3ldg_cuda::PAddForward(in_vals, count, dim, in_count, drop_mask.value, drop_factor, outs); #if TEST_CUDA drop_mask.copyFromDeviceToHost(); for (int i = 0; i < count; ++i) { for (int j = 0; j < dim; ++j) { dtype v = drop_mask[j][i]; batch[i]->drop_mask[j] = v <= drop_factor ? 0 : 1; } } for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } for (Node *n : batch) { n3ldg_cuda::Assert(n->val.verify("PAdd forward")); } #endif } #else void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } #endif #if USE_GPU void backward() { int count = batch.size(); std::vector<std::vector<dtype*>> in_losses; in_losses.reserve(in_count); for (int i = 0; i < in_count; ++i) { std::vector<dtype*> ins; ins.reserve(count); for (PNode n : batch) { PAddNode *padd = static_cast<PAddNode*>(n); ins.push_back(padd->ins.at(i)->loss.value); } in_losses.push_back(ins); } std::vector<dtype *> out_losses; out_losses.reserve(count); for (PNode n : batch) { PAddNode *padd = static_cast<PAddNode*>(n); out_losses.push_back(padd->loss.value); } n3ldg_cuda::PAddBackward(out_losses, count, dim, in_count, drop_mask.value, drop_factor, in_losses); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } for (Node *n : batch) { PAddNode *add = static_cast<PAddNode*>(n); for (Node *in : add->ins) { n3ldg_cuda::Assert(in->loss.verify("PAddExecute backward")); } } #endif } #else void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } #endif }; inline PExecute PAddNode::generate(bool bTrain, dtype cur_drop_factor) { PAddExecute* exec = new PAddExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor * drop_value; exec->in_count = ins.size(); exec->dim = dim; return exec; } #endif
SectionsBeginLink.c
int x; int main() { #pragma omp sections { #pragma omp section { 111; } } #pragma omp sections { #pragma omp section { int x; } #pragma omp section { 111; } } #pragma omp sections { } }
convolution_sgemm_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 16u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 8, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; __fp16* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); img0 += size * 8; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); img0 += size * 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size * 8; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const __fp16* tmpptr = tmp.channel(i / 12); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v20.8h}, [%8] \n" "mov v21.16b, v20.16b \n" "mov v22.16b, v20.16b \n" "mov v23.16b, v20.16b \n" "mov v24.16b, v20.16b \n" "mov v25.16b, v20.16b \n" "mov v26.16b, v20.16b \n" "mov v27.16b, v20.16b \n" "mov v28.16b, v20.16b \n" "mov v29.16b, v20.16b \n" "mov v30.16b, v20.16b \n" "mov v31.16b, v20.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "mov v20.16b, v16.16b \n" "mov v21.16b, v16.16b \n" "mov v22.16b, v16.16b \n" "mov v23.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } } static void convolution_im2col_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-8a-maxk-inch/8a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(64 * maxk, inch / 8, outch / 8, (size_t)2u); for (int q = 0; q + 7 < outch; q += 8) { __fp16* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); float16x8_t _val2 = vld1q_f16(sptr + stride_w * 16); float16x8_t _val3 = vld1q_f16(sptr + stride_w * 24); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); vst1q_f16(ptr + 16, _val2); vst1q_f16(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { float16x8_t _val = vld1q_f16(sptr); vst1q_f16(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
update_ops_matrix_dense_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); void single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { //single_qubit_dense_matrix_gate_old_single(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_old_parallel(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); //single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); } else { single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim); } #else single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); } else { //single_qubit_dense_matrix_gate_parallel_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_parallel(target_qubit_index, matrix, state, dim); } #else //single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim); single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim); #endif #endif } void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #endif void single_qubit_dense_matrix_gate_single_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; for (basis = 0; basis < dim; basis+=2) { CTYPE val0a = state[basis]; CTYPE val1a = state[basis + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; state[basis] = res0a; state[basis + 1] = res1a; } } else { ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; CTYPE val0a = state[basis_0]; CTYPE val0b = state[basis_0 + 1]; CTYPE val1a = state[basis_1]; CTYPE val1b = state[basis_1 + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1b = val0b * matrix[2] + val1b * matrix[3]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; CTYPE res0b = val0b * matrix[0] + val1b * matrix[1]; state[basis_0] = res0a; state[basis_0 + 1] = res0b; state[basis_1] = res1a; state[basis_1 + 1] = res1b; } } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; #pragma omp parallel for for (basis = 0; basis < dim; basis += 2) { CTYPE val0a = state[basis]; CTYPE val1a = state[basis + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; state[basis] = res0a; state[basis + 1] = res1a; } } else { ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; CTYPE val0a = state[basis_0]; CTYPE val0b = state[basis_0 + 1]; CTYPE val1a = state[basis_1]; CTYPE val1b = state[basis_1 + 1]; CTYPE res0a = val0a * matrix[0] + val1a * matrix[1]; CTYPE res1b = val0b * matrix[2] + val1b * matrix[3]; CTYPE res1a = val0a * matrix[2] + val1a * matrix[3]; CTYPE res0b = val0b * matrix[0] + val1b * matrix[1]; state[basis_0] = res0a; state[basis_0 + 1] = res0b; state[basis_1] = res1a; state[basis_1 + 1] = res1b; } } } #endif #ifdef _USE_SIMD void single_qubit_dense_matrix_gate_single_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); for (basis = 0; basis < dim; basis += 2) { double* ptr = (double*)(state + basis); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else { ITYPE state_index = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); for (state_index = 0; state_index < loop_dim; state_index+=2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_parallel_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; if (target_qubit_index == 0) { ITYPE basis = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); #pragma omp parallel for for (basis = 0; basis < dim; basis += 2) { double* ptr = (double*)(state + basis); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else { ITYPE state_index = 0; __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_1 = basis_0 + mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #endif #endif /* void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim / 2; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #ifdef _OPENMP void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim / 2; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } #endif */
simd.h
#pragma once #if (__x86_64__ || __i386__) #include <cpuid.h> #include <x86intrin.h> #endif #if defined(__AVX512__) or defined(__AVX256__) #define ROUND_DOWN(size, step) ((size) & ~((step)-1)) #define TILE (128 * 1024 * 1024) #if defined(__AVX512__) #define SIMD_STORE(a, d) _mm512_storeu_ps(a, d) #define SIMD_LOAD(x) _mm512_loadu_ps(x) #define SIMD_SET(x) _mm512_set1_ps(x) #define SIMD_ADD(x, y) _mm512_add_ps(x, y) #define SIMD_MUL(x, y) _mm512_mul_ps(x, y) #define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c) #define SIMD_SQRT(x) _mm512_sqrt_ps(x) #define SIMD_DIV(x, y) _mm512_div_ps(x, y) #define SIMD_WIDTH 16 #define SIMD_LOAD2(x, h) \ ((h) ? _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*)x)) : _mm512_loadu_ps(x)) #define SIMD_STORE2(x, d, h) \ ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ : _mm512_storeu_ps(x, d)) #define INTV __m256i #elif defined(__AVX256__) #define SIMD_STORE(a, d) _mm256_storeu_ps(a, d) #define SIMD_LOAD(x) _mm256_loadu_ps(x) #define SIMD_SET(x) _mm256_set1_ps(x) #define SIMD_ADD(x, y) _mm256_add_ps(x, y) #define SIMD_MUL(x, y) _mm256_mul_ps(x, y) #define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c) #define SIMD_SQRT(x) _mm256_sqrt_ps(x) #define SIMD_DIV(x, y) _mm256_div_ps(x, y) #define SIMD_WIDTH 8 #define SIMD_LOAD2(x, h) \ ((h) ? _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)x)) : _mm256_loadu_ps(x)) #define SIMD_STORE2(x, d, h) \ ((h) ? _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ : _mm256_storeu_ps(x, d)) #define INTV __m128i #endif union AVX_Data { #if defined(__AVX512__) __m512 data; #elif defined(__AVX256__) __m256 data; #endif // float data_f[16]; }; template <int span> inline void simd_store(float* dst, AVX_Data* src, bool half_precision) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { SIMD_STORE2(dst + SIMD_WIDTH * i, src[i].data, half_precision); } } template <int span> inline void simd_load(AVX_Data* dst, float* src, bool half_precision) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD2(src + SIMD_WIDTH * i, half_precision); } } template <int span> inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data* src_a) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a[i].data); } } template <int span> inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data src_a) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a.data); } } template <int span> inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data* src_m_r, AVX_Data* src_a) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r[i].data, src_a[i].data); } } template <int span> inline void simd_sqrt(AVX_Data* dst, AVX_Data* src) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_SQRT(src[i].data); } } template <int span> inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r.data); } } template <int span> inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r[i].data); } } template <int span> inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r.data); } } template <int span> inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r[i].data); } } template <int span> inline void simd_div(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) { #pragma omp parallel for for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_DIV(src_a_l[i].data, src_a_r[i].data); } } #endif
GB_dense_subassign_25_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_25_template: C<M> = A where C is empty and A is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C<M> = A where C starts as empty, M is structural, and A is dense. The // pattern of C is an exact copy of M. A is full, dense, or bitmap. // M is sparse or hypersparse, and C is constructed with the same pattern as M. { //-------------------------------------------------------------------------- // get C, M, and A //-------------------------------------------------------------------------- ASSERT (GB_sparsity (M) == GB_sparsity (C)) ; int64_t *restrict Ci = C->i ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const int64_t mvlen = M->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_iso = A->iso ; const int8_t *restrict Ab = A->b ; const int64_t avlen = A->vlen ; const int64_t *restrict kfirst_Mslice = M_ek_slicing ; const int64_t *restrict klast_Mslice = M_ek_slicing + M_ntasks ; const int64_t *restrict pstart_Mslice = M_ek_slicing + M_ntasks * 2 ; #ifdef GB_ISO_ASSIGN ASSERT (C->iso) ; #else ASSERT (!C->iso) ; const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // C<M> = A //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // A is bitmap, so zombies can be created in C //---------------------------------------------------------------------- int64_t nzombies = 0 ; int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (tid = 0 ; tid < M_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_nzombies = 0 ; //------------------------------------------------------------------ // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // find the part of M(:,k) to be operated on by this task //-------------------------------------------------------------- int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //-------------------------------------------------------------- // C<M(:,j)> = A(:,j) //-------------------------------------------------------------- // M is hypersparse or sparse. C is the same as M. // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t i = Mi [pM] ; int64_t p = pA + i ; if (Ab [p]) { // C(i,j) = A(i,j) #ifndef GB_ISO_ASSIGN GB_COPY_A_TO_C (Cx, pM, Ax, p, A_iso) ; #endif } else { // C(i,j) becomes a zombie task_nzombies++ ; Ci [pM] = GB_FLIP (i) ; } } } nzombies += task_nzombies ; } C->nzombies = nzombies ; } else { //---------------------------------------------------------------------- // A is full, so no zombies will appear in C //---------------------------------------------------------------------- #ifndef GB_ISO_ASSIGN { int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < M_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; //-------------------------------------------------------------- // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //-------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //---------------------------------------------------------- // find the part of M(:,k) to be operated on by this task //---------------------------------------------------------- int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //---------------------------------------------------------- // C<M(:,j)> = A(:,j) //---------------------------------------------------------- // M is hypersparse or sparse. C is the same as M. // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { // C(i,j) = A(i,j) int64_t p = pA + GBI (Mi, pM, mvlen) ; GB_COPY_A_TO_C (Cx, pM, Ax, p, A_iso) ; } } } } #endif } } #undef GB_ISO_ASSIGN
GB_unaryop__lnot_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_int32 // op(A') function: GB_tran__lnot_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi-v17.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #else Extrae_event (PROGRAM, PI_COMPUTATION); #endif /* do computation -- using just two threads */ // WARNING : incorrect code #pragma omp parallel private(i,x) reduction(+:sum) { #if _DEBUG_ int id = omp_get_thread_num(); #endif #pragma omp for schedule(dynamic, num_steps/4) nowait for (i=0; i < num_steps/2; i++) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } #pragma omp for schedule(dynamic, num_steps/4) nowait for (i=num_steps/2; i < num_steps; i++) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } #pragma omp barrier #pragma omp single pi = step * sum; } #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #else Extrae_event (PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
GB_binop__gt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__gt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__gt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__gt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp32) // A*D function (colscale): GB (_AxD__gt_fp32) // D*A function (rowscale): GB (_DxB__gt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__gt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__gt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp32) // C=scalar+B GB (_bind1st__gt_fp32) // C=scalar+B' GB (_bind1st_tran__gt_fp32) // C=A+scalar GB (_bind2nd__gt_fp32) // C=A'+scalar GB (_bind2nd_tran__gt_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_FP32 || GxB_NO_GT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__atan2_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__atan2_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__atan2_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__atan2_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__atan2_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__atan2_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__atan2_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__atan2_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__atan2_fp64) // C=scalar+B GB (_bind1st__atan2_fp64) // C=scalar+B' GB (_bind1st_tran__atan2_fp64) // C=A+scalar GB (_bind2nd__atan2_fp64) // C=A'+scalar GB (_bind2nd_tran__atan2_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = atan2 (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = atan2 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN2 || GxB_NO_FP64 || GxB_NO_ATAN2_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__atan2_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__atan2_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__atan2_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__atan2_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__atan2_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__atan2_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__atan2_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__atan2_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__atan2_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = atan2 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__atan2_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = atan2 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = atan2 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__atan2_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = atan2 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__atan2_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8683.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(2) { /* E := A*B */ #pragma omp for schedule(dynamic, 8) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(dynamic, 8) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(dynamic, 8) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
trmm_x_coo_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < m * n; i++) alpha_mul(y[i], y[i], beta); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); for (ALPHA_INT ai = 0; ai < mat->nnz; ++ai) { ALPHA_INT cr = mat->row_indx[ai]; if (cr % num_threads != tid) continue; ALPHA_Number *Y = &y[index2(cr, 0, ldy)]; if (mat->col_indx[ai] >= cr) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(mat->col_indx[ai], 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
dopcte-gen2.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <stdbool.h> #include "hstcal_memory.h" #include "hstcal.h" #include "hstio.h" #include "acs.h" #include "acsinfo.h" #include "hstcalerr.h" #include "trlbuf.h" #include "pcte.h" # ifdef _OPENMP # include <omp.h> # endif # include "../../../../ctegen2/ctegen2.h" #include <assert.h> int get_amp_array_size_acs_cte(const ACSInfo *acs, SingleGroup *amp, const int ampID, char *amploc, char *ccdamp, int *xsize, int *ysize, int *xbeg, int *xend, int *ybeg, int *yend); static int extractAmp(SingleGroup * amp, const SingleGroup * image, const unsigned ampID, CTEParamsFast * ctePars); static int insertAmp(SingleGroup * amp, const SingleGroup * image, const unsigned ampID, CTEParamsFast * ctePars); static int alignAmpData(FloatTwoDArray * amp, const unsigned ampID); static int alignAmp(SingleGroup * amp, const unsigned ampID); int doPCTEGen2 (ACSInfo *acs, CTEParamsFast * ctePars, SingleGroup * chipImage, const bool forwardModelOnly) { /* arguments: acs i: calibration switches, etc x io: image to be calibrated; written to in-place */ extern int status; if (!acs || !ctePars || !chipImage) return (status = ALLOCATION_PROBLEM); PtrRegister ptrReg; initPtrRegister(&ptrReg); #ifdef _OPENMP if (acs->nThreads > 1) trlmessage("(pctecorr) Using parallel processing provided by OpenMP inside CTE routine"); #endif char ccdamp[strlen(AMPSTR1)+1]; /* string to hold amps on current chip */ int numamps; /* number of amps on chip */ int ampID; /* index amp A:0, B:1, etc. */ char * amploc; /* pointer to amp character in AMPSORDER */ int nRows, nColumns; /* int for C array dimension sizes */ int amp_xsize, amp_ysize; /* int for amp array size (x/y in CCD coords) */ int amp_xbeg, amp_xend; /* int for beg and end of amp arrays on chip */ int amp_ybeg, amp_yend; /* functions from calacs/lib */ void parseWFCamps (char *acsamps, int chip, char *ccdamp); void TimeStamp (char *message, char *rootname); int PutKeyDbl (Hdr *hd, char *keyword, double value, char *comment); /* test whether this we've been given ACS WFC data, since the CTE algorithm is currently only valid for WFC data. */ if (acs->detector != WFC_CCD_DETECTOR) { trlerror("(pctecorr) only valid for WFC CCD data, PCTECORR should be OMIT for all others."); return (status = ERROR_RETURN); } if (acs->printtime) { TimeStamp("Starting CTE correction...",""); } /* need to figure out which amps are on this chip */ ccdamp[0] = '\0'; /* "reset" the string for reuse */ parseWFCamps(acs->ccdamp, acs->chip, ccdamp); /* loop over amps on this chip and do CTE correction */ numamps = strlen(ccdamp); //Now loop over each amp and compute cte correction {unsigned nthAmp; for (nthAmp = 0; nthAmp < numamps; ++nthAmp) { sprintf(MsgText, "(pctecorr) Performing CTE correction for amp %c", ccdamp[nthAmp]); trlmessage(MsgText); /* get the amp letter and number where A:0, B:1, etc. */ amploc = strchr(AMPSORDER, ccdamp[nthAmp]); ampID = *amploc - AMPSORDER[0]; ctePars->rn_amp = acs->readnoise[ampID]; sprintf(MsgText, "(pctecorr) Read noise level from CCDTAB: %f.", ctePars->rn_amp); trlmessage(MsgText); /* get amp array size */ if (get_amp_array_size_acs_cte(acs, chipImage, ampID, amploc, ccdamp, &amp_xsize, &amp_ysize, &amp_xbeg, &amp_xend, &amp_ybeg, &amp_yend)) { freeOnExit(&ptrReg); return (status); } nRows = amp_ysize; nColumns = amp_xsize; ctePars->nRows = nRows; ctePars->nColumns = nColumns; ctePars->columnOffset = amp_xbeg; ctePars->rowOffset = amp_ybeg; // razColumnOffset is used to align the image with the column scaling (SCLBYCOL) in the PCTETAB. // The SCLBYCOL array is 8192 cols wide which is 2048*4 and therefore does NOT include the physical // overscan columns (24 for ACS WFC) like this array does for calwf3. // The raz format is all 4 amps side by side in CDAB order. ctePars->razColumnOffset = ctePars->columnOffset; if (ampID == AMP_A || ampID == AMP_B) ctePars->razColumnOffset += ACS_WFC_N_COLUMNS_PER_CHIP_EXCL_OVERSCAN; //This is used for the final output SingleGroup ampImage; initSingleGroup(&ampImage); addPtr(&ptrReg, &ampImage, &freeSingleGroup); if (allocSingleGroupExts(&ampImage, nColumns, nRows, SCIEXT | ERREXT, False) != 0) { freeOnExit(&ptrReg); return (status = OUT_OF_MEMORY); } // read data from the SingleGroup into an array containing data from // just one amp if ((status = extractAmp(&ampImage, chipImage, ampID, ctePars))) { freeOnExit(&ptrReg); return (status); } if ((status = alignAmp(&ampImage, ampID))) { freeOnExit(&ptrReg); return (status); } //copy to column major storage SingleGroup columnMajorImage; initSingleGroup(&columnMajorImage); addPtr(&ptrReg, &columnMajorImage, &freeSingleGroup); if (allocSingleGroupExts(&columnMajorImage, nColumns, nRows, SCIEXT, False) != 0) { freeOnExit(&ptrReg); return (status = OUT_OF_MEMORY); } if ((status = copySingleGroup(&columnMajorImage, &ampImage, COLUMNMAJOR))) { freeOnExit(&ptrReg); return (status = ALLOCATION_PROBLEM); } if (forwardModelOnly) { // Not needed as we won't be smoothing. } else { //CALCULATE THE SMOOTH READNOISE IMAGE trlmessage("(pctecorr) Calculating smooth readnoise image..."); if (ctePars->noise_mit != 0) { trlerror("(pctecorr) Only noise model 0 implemented!"); freeOnExit(&ptrReg); return (status = ERROR_RETURN); } } SingleGroup smoothedImage; initSingleGroup(&smoothedImage); addPtr(&ptrReg, &smoothedImage, &freeSingleGroup); if (allocSingleGroupExts(&smoothedImage, nColumns, nRows, SCIEXT, False) != 0) { freeOnExit(&ptrReg); return (status = OUT_OF_MEMORY); } setStorageOrder(&smoothedImage, COLUMNMAJOR); if (forwardModelOnly) { // Don't actually smooth but copy to smoothedImage if ((status = copySingleGroup(&smoothedImage, &columnMajorImage, COLUMNMAJOR))) { freeOnExit(&ptrReg); return (status = ALLOCATION_PROBLEM); } } else { // do some smoothing on the data so we don't amplify the read noise. if ((status = cteSmoothImage(&columnMajorImage, &smoothedImage, ctePars, ctePars->rn_amp))) { freeOnExit(&ptrReg); return (status); } } trlmessage("(pctecorr) ...complete."); trlmessage("(pctecorr) Creating charge trap image..."); SingleGroup trapPixelMap; initSingleGroup(&trapPixelMap); addPtr(&ptrReg, &trapPixelMap, &freeSingleGroup); if (allocSingleGroupExts(&trapPixelMap, nColumns, nRows, SCIEXT, False) != 0) { freeOnExit(&ptrReg); return (status = OUT_OF_MEMORY); } setStorageOrder(&trapPixelMap, COLUMNMAJOR); if ((status = populateTrapPixelMap(&trapPixelMap, ctePars))) { freeOnExit(&ptrReg); return status; } trlmessage("(pctecorr) ...complete."); SingleGroup * cteCorrectedImage = &columnMajorImage; if (forwardModelOnly) { trlmessage("(pctecorr) Running forward model simulation..."); //perform CTE correction if ((status = forwardModel(&smoothedImage, cteCorrectedImage, &trapPixelMap, ctePars))) { freeOnExit(&ptrReg); return status; } } else { trlmessage("(pctecorr) Running correction algorithm..."); //perform CTE correction if ((status = inverseCTEBlur(&smoothedImage, cteCorrectedImage, &trapPixelMap, ctePars))) { freeOnExit(&ptrReg); return status; } } freePtr(&ptrReg, &trapPixelMap); trlmessage("(pctecorr) ...complete."); // add 10% correction to error in quadrature. float totalCounts = 0; float totalRawCounts = 0; #ifdef _OPENMP #pragma omp parallel shared(nRows, nColumns, cteCorrectedImage, smoothedImage, ampImage) #endif { double temp_err; float threadCounts = 0; float threadRawCounts = 0; float delta; {unsigned k; #ifdef _OPENMP #pragma omp for schedule(static) #endif //This loop order is row major as more row major storage is accessed than column. //MORE: look into worth splitting out ops, prob needs a order swap (copy) so perhaps not worth it. for (k = 0; k < nRows; ++k) { {unsigned m; for (m = 0; m < nColumns; ++m) { delta = (PixColumnMajor(cteCorrectedImage->sci.data,k,m) - PixColumnMajor(smoothedImage.sci.data,k,m)); threadCounts += delta; threadRawCounts += Pix(ampImage.sci.data, m, k); temp_err = 0.1 * fabs(delta); Pix(ampImage.sci.data, m, k) += delta; float err2 = Pix(ampImage.err.data, m, k); err2 *= err2; Pix(ampImage.err.data, m, k) = sqrt(err2 + temp_err*temp_err); }} }} #ifdef _OPENMP #pragma omp critical(deltaAggregate) #endif { totalCounts += threadCounts; totalRawCounts += threadRawCounts; } }//close parallel block sprintf(MsgText, "(pctecorr) Total count difference (corrected-raw) incurred from correction: %f (%f%%)", totalCounts, totalCounts/totalRawCounts*100); trlmessage(MsgText); // put the CTE corrected data back into the SingleGroup structure if ((status = alignAmp(&ampImage, ampID))) { freeOnExit(&ptrReg); return (status); } if ((status = insertAmp(chipImage, &ampImage, ampID, ctePars))) { freeOnExit(&ptrReg); return (status); } // free mem used by our amp arrays freePtr(&ptrReg, &ampImage); freePtr(&ptrReg, &columnMajorImage); freePtr(&ptrReg, &smoothedImage); }} if (acs->printtime) TimeStamp("CTE corrections complete",""); freeOnExit(&ptrReg); return (status); } static int extractAmp(SingleGroup * amp, const SingleGroup * image, const unsigned ampID, CTEParamsFast * ctePars) { extern int status; if (!amp || !amp->sci.data.data || !image || !image->sci.data.data) return (status = ALLOCATION_PROBLEM); //WARNING - assumes row major storage assert(amp->sci.data.storageOrder == ROWMAJOR && image->sci.data.storageOrder == ROWMAJOR); if (ampID != AMP_A && ampID != AMP_B && ampID != AMP_C && ampID != AMP_D) { trlerror("Amp number not recognized, must be 0-3."); return (status = ERROR_RETURN); } const unsigned nRows = amp->sci.data.ny; const unsigned nColumns = amp->sci.data.nx; unsigned rowSkipLength = image->sci.data.nx; unsigned offset = ctePars->columnOffset; copyOffsetSingleGroup(amp, image, nRows, nColumns, 0, offset, nColumns, rowSkipLength); return status; } static int insertAmp(SingleGroup * image, const SingleGroup * amp, const unsigned ampID, CTEParamsFast * ctePars) { extern int status; if (!amp || !amp->sci.data.data || !image || !image->sci.data.data) return (status = ALLOCATION_PROBLEM); //WARNING - assumes row major storage assert(amp->sci.data.storageOrder == ROWMAJOR && image->sci.data.storageOrder == ROWMAJOR); if (ampID != AMP_A && ampID != AMP_B && ampID != AMP_C && ampID != AMP_D) { trlerror("Amp number not recognized, must be 0-3."); return (status = ERROR_RETURN); } const unsigned nRows = amp->sci.data.ny; const unsigned nColumns = amp->sci.data.nx; unsigned rowSkipLength = image->sci.data.nx; unsigned offset = ctePars->columnOffset; copyOffsetSingleGroup(image, amp, nRows, nColumns, offset, 0, rowSkipLength, nColumns); return status; } static int alignAmp(SingleGroup * amp, const unsigned ampID) { extern int status; if (!amp) return (status = ALLOCATION_PROBLEM); //sci data if (amp->sci.data.data) { if ((status = alignAmpData(&amp->sci.data, ampID))) return status; } //err data if (amp->err.data.data) { if ((status = alignAmpData(&amp->err.data, ampID))) return status; } //dq data if (amp->dq.data.data) assert(0);//unimplemented return status; } static int alignAmpData(FloatTwoDArray * amp, const unsigned ampID) { //NOTE: There is a similar version of this in wfc3 - code changes should be reflected in both. //Align image quadrants such that the amps are at the bottom left, i.e. aligned with amp C. extern int status; if (!amp || !amp->data) return (status = ALLOCATION_PROBLEM); //Amp C is already correctly aligned if (ampID == AMP_C) return status; PtrRegister ptrReg; initPtrRegister(&ptrReg); //WARNING - assumes row major storage assert(amp->storageOrder == ROWMAJOR); const unsigned nColumns = amp->nx; const unsigned nRows = amp->ny; //Flip about y axis, i.e. about central column if (ampID == AMP_B || ampID == AMP_D) { //grab a row, flip it, put it back const unsigned rowLength = nColumns; {unsigned i; #ifdef _OPENMP #pragma omp parallel for shared(amp) private(i) schedule(static) #endif for (i = 0; i < nRows; ++i) { //find row float * row = amp->data + i*nColumns; //flip right to left float tempPixel; {unsigned j; for (j = 0; j < rowLength/2; ++j) { tempPixel = row[j]; row[j] = row[rowLength-1-j]; row[rowLength-1-j] = tempPixel; }} }} } //Only thing left is to flip AB chip upside down //Flip about x axis, i.e. central row if (ampID == AMP_A || ampID == AMP_B) { //either physically align all or propagate throughout a mechanism to work on the array upside down (flip b quad though) //we'll just flip all for now. See if there's info in the header specifying amp location rel to pix in file, //i.e. a way to know whether the chip is 'upside down'. Could then reverse cte corr trail walk direction Bool allocationFail = False; #ifdef _OPENMP #pragma omp parallel shared(amp, allocationFail) #endif { float * tempRow = NULL; size_t rowSize = nColumns*sizeof(*tempRow); tempRow = malloc(rowSize); if (!tempRow) { #ifdef _OPENMP #pragma omp critical(allocationCheck) // This really isn't needed #endif { allocationFail = True; } } #ifdef _OPENMP #pragma omp barrier #endif if (!allocationFail) { {unsigned i; #ifdef _OPENMP #pragma omp for schedule(static) #endif for (i = 0; i < nRows/2; ++i) { float * topRow = amp->data + i*nColumns; float * bottomRow = amp->data + (nRows-1-i)*nColumns; memcpy(tempRow, topRow, rowSize); memcpy(topRow, bottomRow, rowSize); memcpy(bottomRow, tempRow, rowSize); }} } if (tempRow) free(tempRow); }//close parallel block if (allocationFail) { sprintf(MsgText, "Out of memory for 'tempRow' in 'alignAmpData'"); trlerror(MsgText); return (status = OUT_OF_MEMORY); } } return status; }
GB_assign_zombie3.c
//------------------------------------------------------------------------------ // GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all // entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is // zero. This step is not done for GxB_*_subassign, since that method does not // modify anything outside IxJ. // GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other. // C must be sparse or hypersparse. // M can have any sparsity structure: hypersparse, sparse, bitmap, or full #include "GB_assign.h" #include "GB_assign_zombie.h" #include "GB_subassign_methods.h" void GB_assign_zombie3 ( GrB_Matrix C, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const int64_t j, // vector index with entries to delete const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (!GB_PENDING (C)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_JUMBLED (M)) ; // binary search on M ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get C (:,j) //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ch = C->h ; const int64_t *GB_RESTRICT Cp = C->p ; int64_t *GB_RESTRICT Ci = C->i ; int64_t pC_start, pC_end, pleft = 0, pright = C->nvec-1 ; GB_lookup (C->h != NULL, Ch, Cp, C->vlen, &pleft, pright, j, &pC_start, &pC_end) ; int64_t nzombies = C->nzombies ; const int64_t zjnz = pC_end - pC_start ; //-------------------------------------------------------------------------- // get M(:,0) //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Mp = M->p ; const int8_t *GB_RESTRICT Mb = M->b ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t Mvlen = M->vlen ; int64_t pM_start = 0 ; // Mp [0] int64_t pM_end = GBP (Mp, 1, Mvlen) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // delete entries from C(:,j) that are outside I, if the mask M allows it //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t p1, p2 ; GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ; for (int64_t pC = pC_start + p1 ; pC < pC_start + p2 ; pC++) { //------------------------------------------------------------------ // get C(i,j) //------------------------------------------------------------------ int64_t i = Ci [pC] ; if (!GB_IS_ZOMBIE (i)) { //-------------------------------------------------------------- // C(i,j) is outside C(I,j) if i is not in the list I //-------------------------------------------------------------- bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ; if (i_outside) { //---------------------------------------------------------- // C(i,j) is a live entry not in the C(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (i) ; if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete C(i,j) by marking it as a zombie nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; }
distribute.c
#include <stdlib.h> #include <omp.h> int main() { const int N = 8; int a[N]; int i; #pragma omp teams num_teams(2) thread_limit(N) #pragma omp distribute for (i=0; i<N; i++) { a[i] = omp_get_team_num(); } }
FeatureImportanceTree.h
/* * This software is distributed under BSD 3-clause license (see LICENSE file). * * Authors: Yuhui Liu */ #ifndef _FEATUREIMPORTANCETREE_H__ #define _FEATUREIMPORTANCETREE_H__ #include <shogun/lib/SGVector.h> #include <shogun/mathematics/linalg/LinalgNamespace.h> #include <shogun/multiclass/tree/TreeMachine.h> #include <shogun/multiclass/tree/TreeMachineNode.h> namespace shogun { /** @brief class FeatureImportanceTree, a mixin class for the tree which * needs computing feature importances. This class is derived from * TreeMachine<NodeType> and stores the feature importances * */ template <typename NodeType> class FeatureImportanceTree : public TreeMachine<NodeType> { protected: void compute_feature_importance( int32_t num_features, const std::shared_ptr<TreeMachineNode<NodeType>>& node) { m_feature_importances = SGVector<float64_t>(num_features); m_feature_importances.zero(); compute_feature_importance_impl(node); float64_t total_num_sample = node->data.total_weight; linalg::scale( m_feature_importances, m_feature_importances, 1.0 / total_num_sample); auto normalizer = linalg::sum(m_feature_importances); if (normalizer > 0) linalg::scale( m_feature_importances, m_feature_importances, 1.0 / normalizer); } SGVector<float64_t> m_feature_importances; public: virtual ~FeatureImportanceTree() = default; private: void compute_feature_importance_impl( const std::shared_ptr<TreeMachineNode<NodeType>>& node) { const auto& children = node->get_children(); m_feature_importances[node->data.attribute_id] += node->data.impurity * node->data.total_weight; #ifndef _MSC_VER #pragma omp parallel for shared(m_feature_importances) #endif for (auto i = 0; i < children.size(); i++) { const auto& child = children[i]; m_feature_importances[node->data.attribute_id] -= child->data.impurity * child->data.total_weight; if (child->data.attribute_id >= 0) { compute_feature_importance_impl(child); } } } }; } // namespace shogun #endif
pi_omp.c
#include <stdio.h> #include "random.c" #include <stdlib.h> #include <omp.h> #include <time.h> #include <math.h> int main (int argc, char* argv[]) { double pi; long sum = 0; if (argc != 3){ //Anzahl der Argumente passend pruefen. fprintf(stderr,"Usage: ./pi_omp <n_threads> <n_samples>\n"); exit(EXIT_FAILURE); } int n_threads = atoi(argv[1]); // Zuweisung des Parameter der die Anzahl der Threads bestimmt. if (n_threads < 1){//Abfangen ob möglich min 1 fprintf(stderr,"Bitte gib eine höhere Zahl bei den Threads an\n"); exit(EXIT_FAILURE); } long sample = atol(argv[2]);//Zuweisung der Samples if (sample < 0){//Abfangen, ob etwas Sinnvolles berechnet werden kann fprintf(stderr,"Bitte gib eine natürliche Anzahl an Samples an!\n"); exit(EXIT_FAILURE); } // schedule (static, dynamic, guided) fuer Messungen einfuegen. #pragma omp parallel for num_threads(n_threads) reduction( + : sum ) for (long i = 0; i <= sample; i++) { double a, b, z; a = pr_random_f(1.0); b = pr_random_f(1.0); //~printf("a= %f, b= %f, counter= %d\n", a, b, counter); z = a*a + b*b; if ( z <= 1) sum++; // zu Debug-Zwecken } pi = (double) sum / sample * 4.0; double error = (pi - M_PI) / M_PI; printf("rel_err= %f, sum= %ld, loops = %ld, pi = %f\n", error, sum, sample, pi); }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, typename std::enable_if< !std::is_same<typename std::remove_reference<Callable>::type, RegionCodeGenTy>::value>::type * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<typename std::remove_reference<Callable>::type>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the original shared item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Ref(Ref), Private(Private), ReductionOp(ReductionOp) {} }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of original shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for a reduction item. /// \param N Number of the reduction item. void emitSharedLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::SmallDenseMap<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqeName; LValue IVLVal; SmallString<16> IVName; /// True if original lvalue for loop counter can be used in codegen (simd /// region or simd only mode) and no need to create threadprivate /// references. bool UseOriginalIV = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { CodeGenModule &CGM; const bool NeedToPush; public: LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); ~LastprivateConditionalRAII(); }; protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Tries to emit declare variant function for \p OldGD from \p NewGD. /// \param OrigAddr LLVM IR value for \p OldGD. /// \param IsForDefinition true, if requested emission for the definition of /// \p OldGD. /// \returns true, was able to emit a definition function for \p OldGD, which /// points to \p NewGD. virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD, const GlobalDecl &OldGD, llvm::GlobalValue *OrigAddr, bool IsForDefinition); /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Default const ident_t object used for initialization of all other /// ident_t objects. llvm::Constant *DefaultOpenMPPSource = nullptr; using FlagsTy = std::pair<unsigned, unsigned>; /// Map of flags and corresponding default locations. using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>; OpenMPDefaultLocMapTy OpenMPDefaultLocMap; Address getOrCreateDefaultLocation(unsigned Flags); QualType IdentQTy; llvm::StructType *IdentTy = nullptr; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// }; QualType TgtOffloadEntryQTy; /// struct __tgt_device_image{ /// void *ImageStart; // Pointer to the target code start. /// void *ImageEnd; // Pointer to the target code end. /// // We also add the host entries to the device image, as it may be useful /// // for the target runtime to have access to that information. /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all /// // the entries. /// __tgt_offload_entry *EntriesEnd; // End of the table with all the /// // entries (non inclusive). /// }; QualType TgtDeviceImageQTy; /// struct __tgt_bin_desc{ /// int32_t NumDevices; // Number of devices supported. /// __tgt_device_image *DeviceImages; // Arrays of device images /// // (one per device). /// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the /// // entries. /// __tgt_offload_entry *EntriesEnd; // End of the table with all the /// // entries (non inclusive). /// }; QualType TgtBinaryDescriptorQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; /// Mapping of the original functions to their variants and original global /// decl. llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>, std::pair<GlobalDecl, GlobalDecl>> DeferredVariantFunction; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Returns __tgt_device_image type. QualType getTgtDeviceImageQTy(); /// Returns __tgt_bin_desc type. QualType getTgtBinaryDescriptorQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns specified OpenMP runtime function. /// \param Function OpenMP runtime function. /// \return Specified function. llvm::FunctionCallee createRuntimeFunction(unsigned Function); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, const Expr *Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library. llvm::Value *MapTypesArray = nullptr; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo) : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D); /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Emits the definition of the declare variant function. virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition); /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Initializes global counter for lastprivate conditional. virtual void initLastprivateConditionalCounter(CodeGenFunction &CGF, const OMPExecutableDirective &S); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, const Expr *Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void BuildAll( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; // LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0); // LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0); // Element::EquationIdVectorType solidEquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double deltaPressure = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { // if(itNode->Is(SOLID)){ // NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); // const unsigned int neighSize = neighb_nodes.size() +1 ; // if(neighSize>1) // { // const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); // if (solidLHS_Contribution.size1() != 1) // solidLHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!! // if (solidRHS_Contribution.size() != 1) // solidRHS_Contribution.resize(1, false); //false says not to preserve existing storage!! // solidLHS_Contribution= ZeroMatrix(1,1); // solidRHS_Contribution= ZeroVector(1); // if (solidEquationId.size() != 1) // solidEquationId.resize(1, false); // // if (solidLHS_Contribution.size1() != neighSize) // // solidLHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!! // // if (solidRHS_Contribution.size() != neighSize) // // solidRHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!! // // solidLHS_Contribution= ZeroMatrix(neighSize,neighSize); // // solidRHS_Contribution= ZeroVector(neighSize); // // if (solidEquationId.size() != neighSize) // // solidEquationId.resize(neighSize, false); // double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); // double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0; // deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1); // solidLHS_Contribution(0,0)+= nodalVolume/volumetricCoeff; // solidRHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff; // solidRHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume; // const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); // solidEquationId[0]=itNode->GetDof(PRESSURE,xDofPos).EquationId(); // // Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); // // // const unsigned int neighSize = neighb_nodes.size()+1; // // const unsigned int neighSize = nodalSFDneighboursId.size(); // // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // // for (unsigned int i = 0; i< neighSize; i++) // // { // // unsigned int indexNode=i+1; // // if(indexNode<neighSize){ // // unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; // // for (unsigned int k = 0; k< neighb_nodes.size(); k++) // // { // // unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // // if(neigh_nodes_id==other_neigh_nodes_id){ // // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId(); // // break; // // } // // } // // } // // } // // }else{ // // for (unsigned int k = 0; k< neighb_nodes.size(); k++) // // { // // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId(); // // } // // } // #ifdef _OPENMP // Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array); // #else // Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId); // #endif // } // } //if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { // if (LHS_Contribution.size1() != neighSize) // LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!! // if (RHS_Contribution.size() != neighSize) // RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!! // LHS_Contribution= ZeroMatrix(neighSize,neighSize); // RHS_Contribution= ZeroVector(neighSize); // if (EquationId.size() != neighSize) // EquationId.resize(neighSize, false); if (LHS_Contribution.size1() != 1) LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != 1) RHS_Contribution.resize(1, false); //false says not to preserve existing storage!! LHS_Contribution = ZeroMatrix(1, 1); RHS_Contribution = ZeroVector(1); if (EquationId.size() != 1) EquationId.resize(1, false); if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (nodalVolume > 0) { // in interface nodes not in contact with fluid elements the nodal volume is zero double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1 && itNode->IsNot(SOLID)) { deviatoricCoeff = 0.1; } double volumetricCoeff = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) + 2.0 * deviatoricCoeff / 3.0; if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); } deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; } } if (itNode->Is(SOLID)) { double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; double volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; } const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); // Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // // const unsigned int neighSize = neighb_nodes.size()+1; // const unsigned int neighSize = nodalSFDneighboursId.size(); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // for (unsigned int i = 0; i< neighSize; i++) // { // unsigned int indexNode=i+1; // if(indexNode<neighSize){ // unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; // for (unsigned int k = 0; k< neighb_nodes.size(); k++) // { // unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // if(neigh_nodes_id==other_neigh_nodes_id){ // EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId(); // break; // } // } // } // } // }else{ // for (unsigned int k = 0; k< neighb_nodes.size(); k++) // { // EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId(); // } // } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } //} } // } ElementsArrayType &pElements = rModelPart.Elements(); int number_of_threads = OpenMPUtils::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector<omp_lock_t> lock_array(A.size1()); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel() > 0) { KRATOS_WATCH(number_of_threads); KRATOS_WATCH(element_partition); } #pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType elementalEquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE); // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //if((*it)->Is(FLUID)){ if ((*it)->IsNot(SOLID)) { //calculate elemental contribution (*it)->CalculateLocalSystem(elementalLHS_Contribution, elementalRHS_Contribution, CurrentProcessInfo); Geometry<Node<3>> &geom = (*it)->GetGeometry(); if (elementalEquationId.size() != geom.size()) elementalEquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) elementalEquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId(); //assemble the elemental contribution #ifdef _OPENMP this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId, lock_array); #else this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId); #endif } } } #ifdef _OPENMP for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); /* boost::timer c_build_time; */ BuildAll(pScheme, rModelPart, A, b); Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; /* const double start_solve = OpenMPUtils::GetCurrentTime(); */ Timer::Start("Solve"); /* boost::timer c_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */ Timer::Stop("Solve"); /* const double stop_solve = OpenMPUtils::GetCurrentTime(); */ KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } void Build( typename TSchemeType::Pointer pScheme, ModelPart &r_model_part, TSystemMatrixType &A, TSystemVectorType &b) override { KRATOS_TRY if (!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType &pElements = r_model_part.Elements(); // //getting the array of the conditions // ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); //create a partition of the element array int number_of_threads = OpenMPUtils::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector<omp_lock_t> lock_array(A.size1()); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel() > 0) { KRATOS_WATCH(number_of_threads); KRATOS_WATCH(element_partition); } double start_prod = OpenMPUtils::GetCurrentTime(); #pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE); // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution (*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Geometry<Node<3>> &geom = (*it)->GetGeometry(); if (EquationId.size() != geom.size()) EquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) EquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId(); //assemble the elemental contribution #ifdef _OPENMP this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); #else this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } if (this->GetEchoLevel() > 0) { double stop_prod = OpenMPUtils::GetCurrentTime(); std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; } #ifdef _OPENMP for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } //#pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = pElements.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } // ConditionsArrayType& pConditions = rModelPart.Conditions(); // const int nconditions = static_cast<int>(pConditions.size()); // #pragma omp parallel for firstprivate(nconditions, ElementalDofList) // for (int i = 0; i < nconditions; i++) // { // typename ConditionsArrayType::iterator it = pConditions.begin() + i; // const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // // gets list of Dof involved on every element // pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); // dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); // } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY /* boost::timer c_contruct_matrix; */ if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } /* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */ KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); const std::size_t equation_size = BaseType::mEquationSystemSize; std::vector<std::unordered_set<std::size_t>> indices(equation_size); #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { indices[iii].reserve(40); } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel firstprivate(ids) { // The process info ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<std::unordered_set<std::size_t>> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem < number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId(*it_elem, ids, r_current_process_info); for (auto &id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto &row_indices = temp_indexes[id_i]; for (auto &id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond < number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->EquationId(*it_cond, ids, r_current_process_info); for (auto &id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto &row_indices = temp_indexes[id_i]; for (auto &id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int> &partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
GB_binop__max_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8) // A*D function (colscale): GB (_AxD__max_uint8) // D*A function (rowscale): GB (_DxB__max_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8) // C=scalar+B GB (_bind1st__max_uint8) // C=scalar+B' GB (_bind1st_tran__max_uint8) // C=A+scalar GB (_bind2nd__max_uint8) // C=A'+scalar GB (_bind2nd_tran__max_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
verlet_accelerate2.c
#include "verlet.h" #include "cloud.h" void verlet_step_accelerate (int Np, double dt, const double *restrict X[3], double *restrict U[3]) { #pragma omp parallel for for (int i = 0; i < Np; i++) { for (int j = i + 1; j < Np; j++) { double du[3]; force (dt, X[0][i], X[1][i], X[2][i], X[0][j], X[1][j], X[2][j], du); for (int d = 0; d < 3; d++) { U[d][i] += du[d]; U[d][j] -= du[d]; } } } }
core.c
/* Generated by Cython 0.29.24 */ /* BEGIN: Cython Metadata { "distutils": { "name": "monotonic_align.core", "sources": [ "core.pyx" ] }, "module_name": "monotonic_align.core" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_24" #define CYTHON_HEX_VERSION 0x001D18F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #if defined(PyUnicode_IS_READY) #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #else #define __Pyx_PyUnicode_READY(op) (0) #endif #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #endif #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__monotonic_align__core #define __PYX_HAVE_API__monotonic_align__core /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "core.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; struct __pyx_opt_args_15monotonic_align_4core_maximum_path_gradtts_c; /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { int __pyx_n; float max_neg_val; }; /* "monotonic_align/core.pyx":75 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_gradtts_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int b = values.shape[0] * */ struct __pyx_opt_args_15monotonic_align_4core_maximum_path_gradtts_c { int __pyx_n; float max_neg_val; }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'monotonic_align.core' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_each_gradtts(__Pyx_memviewslice, __Pyx_memviewslice, int, int, float); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_gradtts_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_gradtts_c *__pyx_optional_args); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "monotonic_align.core" extern int __pyx_module_is_main_monotonic_align__core; int __pyx_module_is_main_monotonic_align__core = 0; /* Implementation of 'monotonic_align.core' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_t_xs[] = "t_xs"; static const char __pyx_k_t_ys[] = "t_ys"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_paths[] = "paths"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_values[] = "values"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_max_neg_val[] = "max_neg_val"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_max_neg_val; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_paths; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_t_xs; static PyObject *__pyx_n_s_t_ys; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_values; static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ static PyObject *__pyx_pf_15monotonic_align_4core_2maximum_path_gradtts_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_xs, __Pyx_memviewslice __pyx_v_t_ys, float __pyx_v_max_neg_val); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static float __pyx_k_; static float __pyx_k__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__17; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { float __pyx_v_max_neg_val = __pyx_k_; int __pyx_v_x; int __pyx_v_y; float __pyx_v_v_prev; float __pyx_v_v_cur; int __pyx_v_index; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; long __pyx_t_4; int __pyx_t_5; long __pyx_t_6; long __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; float __pyx_t_11; float __pyx_t_12; float __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; } } /* "monotonic_align/core.pyx":13 * cdef float v_cur * cdef float tmp * cdef int index = t_x - 1 # <<<<<<<<<<<<<< * * for y in range(t_y): */ __pyx_v_index = (__pyx_v_t_x - 1); /* "monotonic_align/core.pyx":15 * cdef int index = t_x - 1 * * for y in range(t_y): # <<<<<<<<<<<<<< * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: */ __pyx_t_1 = __pyx_v_t_y; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_y = __pyx_t_3; /* "monotonic_align/core.pyx":16 * * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< * if x == y: * v_cur = max_neg_val */ __pyx_t_4 = (__pyx_v_y + 1); __pyx_t_5 = __pyx_v_t_x; if (((__pyx_t_4 < __pyx_t_5) != 0)) { __pyx_t_6 = __pyx_t_4; } else { __pyx_t_6 = __pyx_t_5; } __pyx_t_4 = __pyx_t_6; __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); __pyx_t_6 = 0; if (((__pyx_t_5 > __pyx_t_6) != 0)) { __pyx_t_7 = __pyx_t_5; } else { __pyx_t_7 = __pyx_t_6; } __pyx_t_6 = __pyx_t_4; for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { __pyx_v_x = __pyx_t_5; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":18 * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: * v_cur = max_neg_val # <<<<<<<<<<<<<< * else: * v_cur = value[y-1, x] */ __pyx_v_v_cur = __pyx_v_max_neg_val; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ goto __pyx_L7; } /* "monotonic_align/core.pyx":20 * v_cur = max_neg_val * else: * v_cur = value[y-1, x] # <<<<<<<<<<<<<< * if x == 0: * if y == 0: */ /*else*/ { __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_x; __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); } __pyx_L7:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ __pyx_t_8 = ((__pyx_v_x == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ __pyx_t_8 = ((__pyx_v_y == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":23 * if x == 0: * if y == 0: * v_prev = 0. # <<<<<<<<<<<<<< * else: * v_prev = max_neg_val */ __pyx_v_v_prev = 0.; /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ goto __pyx_L9; } /* "monotonic_align/core.pyx":25 * v_prev = 0. * else: * v_prev = max_neg_val # <<<<<<<<<<<<<< * else: * v_prev = value[y-1, x-1] */ /*else*/ { __pyx_v_v_prev = __pyx_v_max_neg_val; } __pyx_L9:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ goto __pyx_L8; } /* "monotonic_align/core.pyx":27 * v_prev = max_neg_val * else: * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< * value[y, x] += max(v_prev, v_cur) * */ /*else*/ { __pyx_t_10 = (__pyx_v_y - 1); __pyx_t_9 = (__pyx_v_x - 1); __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); } __pyx_L8:; /* "monotonic_align/core.pyx":28 * else: * v_prev = value[y-1, x-1] * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< * * for y in range(t_y - 1, -1, -1): */ __pyx_t_11 = __pyx_v_v_cur; __pyx_t_12 = __pyx_v_v_prev; if (((__pyx_t_11 > __pyx_t_12) != 0)) { __pyx_t_13 = __pyx_t_11; } else { __pyx_t_13 = __pyx_t_12; } __pyx_t_9 = __pyx_v_y; __pyx_t_10 = __pyx_v_x; *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; } } /* "monotonic_align/core.pyx":30 * value[y, x] += max(v_prev, v_cur) * * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): */ for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_y = __pyx_t_1; /* "monotonic_align/core.pyx":31 * * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 # <<<<<<<<<<<<<< * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 */ __pyx_t_10 = __pyx_v_y; __pyx_t_9 = __pyx_v_index; *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ __pyx_t_14 = ((__pyx_v_index != 0) != 0); if (__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); if (!__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_index; __pyx_t_15 = (__pyx_v_y - 1); __pyx_t_16 = (__pyx_v_index - 1); __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); __pyx_t_8 = __pyx_t_14; __pyx_L13_bool_binop_done:; if (__pyx_t_8) { /* "monotonic_align/core.pyx":33 * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_v_index = (__pyx_v_index - 1); /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ } } /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ /* function exit code */ } /* "monotonic_align/core.pyx":37 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each_gradtts(int[:,::1] path, float[:,::1] value, int t_x, int t_y, float max_neg_val) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ static void __pyx_f_15monotonic_align_4core_maximum_path_each_gradtts(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_x, int __pyx_v_t_y, float __pyx_v_max_neg_val) { int __pyx_v_x; int __pyx_v_y; float __pyx_v_v_prev; float __pyx_v_v_cur; int __pyx_v_index; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; long __pyx_t_4; int __pyx_t_5; long __pyx_t_6; long __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; float __pyx_t_11; float __pyx_t_12; float __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; int __pyx_t_16; /* "monotonic_align/core.pyx":43 * cdef float v_cur * cdef float tmp * cdef int index = t_x - 1 # <<<<<<<<<<<<<< * * for y in range(t_y): */ __pyx_v_index = (__pyx_v_t_x - 1); /* "monotonic_align/core.pyx":45 * cdef int index = t_x - 1 * * for y in range(t_y): # <<<<<<<<<<<<<< * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: */ __pyx_t_1 = __pyx_v_t_y; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_y = __pyx_t_3; /* "monotonic_align/core.pyx":46 * * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< * if x == y: * v_cur = max_neg_val */ __pyx_t_4 = (__pyx_v_y + 1); __pyx_t_5 = __pyx_v_t_x; if (((__pyx_t_4 < __pyx_t_5) != 0)) { __pyx_t_6 = __pyx_t_4; } else { __pyx_t_6 = __pyx_t_5; } __pyx_t_4 = __pyx_t_6; __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); __pyx_t_6 = 0; if (((__pyx_t_5 > __pyx_t_6) != 0)) { __pyx_t_7 = __pyx_t_5; } else { __pyx_t_7 = __pyx_t_6; } __pyx_t_6 = __pyx_t_4; for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { __pyx_v_x = __pyx_t_5; /* "monotonic_align/core.pyx":47 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":48 * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: * v_cur = max_neg_val # <<<<<<<<<<<<<< * else: * v_cur = value[x, y-1] */ __pyx_v_v_cur = __pyx_v_max_neg_val; /* "monotonic_align/core.pyx":47 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ goto __pyx_L7; } /* "monotonic_align/core.pyx":50 * v_cur = max_neg_val * else: * v_cur = value[x, y-1] # <<<<<<<<<<<<<< * if x == 0: * if y == 0: */ /*else*/ { __pyx_t_9 = __pyx_v_x; __pyx_t_10 = (__pyx_v_y - 1); __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); } __pyx_L7:; /* "monotonic_align/core.pyx":51 * else: * v_cur = value[x, y-1] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ __pyx_t_8 = ((__pyx_v_x == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":52 * v_cur = value[x, y-1] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ __pyx_t_8 = ((__pyx_v_y == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":53 * if x == 0: * if y == 0: * v_prev = 0. # <<<<<<<<<<<<<< * else: * v_prev = max_neg_val */ __pyx_v_v_prev = 0.; /* "monotonic_align/core.pyx":52 * v_cur = value[x, y-1] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ goto __pyx_L9; } /* "monotonic_align/core.pyx":55 * v_prev = 0. * else: * v_prev = max_neg_val # <<<<<<<<<<<<<< * else: * v_prev = value[x-1, y-1] */ /*else*/ { __pyx_v_v_prev = __pyx_v_max_neg_val; } __pyx_L9:; /* "monotonic_align/core.pyx":51 * else: * v_cur = value[x, y-1] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ goto __pyx_L8; } /* "monotonic_align/core.pyx":57 * v_prev = max_neg_val * else: * v_prev = value[x-1, y-1] # <<<<<<<<<<<<<< * value[x, y] = max(v_cur, v_prev) + value[x, y] * */ /*else*/ { __pyx_t_10 = (__pyx_v_x - 1); __pyx_t_9 = (__pyx_v_y - 1); __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); } __pyx_L8:; /* "monotonic_align/core.pyx":58 * else: * v_prev = value[x-1, y-1] * value[x, y] = max(v_cur, v_prev) + value[x, y] # <<<<<<<<<<<<<< * * for y in range(t_y - 1, -1, -1): */ __pyx_t_11 = __pyx_v_v_prev; __pyx_t_12 = __pyx_v_v_cur; if (((__pyx_t_11 > __pyx_t_12) != 0)) { __pyx_t_13 = __pyx_t_11; } else { __pyx_t_13 = __pyx_t_12; } __pyx_t_9 = __pyx_v_x; __pyx_t_10 = __pyx_v_y; __pyx_t_14 = __pyx_v_x; __pyx_t_15 = __pyx_v_y; *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_14 * __pyx_v_value.strides[0]) )) + __pyx_t_15)) )) = (__pyx_t_13 + (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )))); } } /* "monotonic_align/core.pyx":60 * value[x, y] = max(v_cur, v_prev) + value[x, y] * * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< * path[index, y] = 1 * if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): */ for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_y = __pyx_t_1; /* "monotonic_align/core.pyx":61 * * for y in range(t_y - 1, -1, -1): * path[index, y] = 1 # <<<<<<<<<<<<<< * if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): * index = index - 1 */ __pyx_t_10 = __pyx_v_index; __pyx_t_9 = __pyx_v_y; *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; /* "monotonic_align/core.pyx":62 * for y in range(t_y - 1, -1, -1): * path[index, y] = 1 * if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ __pyx_t_16 = ((__pyx_v_index != 0) != 0); if (__pyx_t_16) { } else { __pyx_t_8 = __pyx_t_16; goto __pyx_L13_bool_binop_done; } __pyx_t_16 = ((__pyx_v_index == __pyx_v_y) != 0); if (!__pyx_t_16) { } else { __pyx_t_8 = __pyx_t_16; goto __pyx_L13_bool_binop_done; } __pyx_t_9 = __pyx_v_index; __pyx_t_10 = (__pyx_v_y - 1); __pyx_t_15 = (__pyx_v_index - 1); __pyx_t_14 = (__pyx_v_y - 1); __pyx_t_16 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_14)) )))) != 0); __pyx_t_8 = __pyx_t_16; __pyx_L13_bool_binop_done:; if (__pyx_t_8) { /* "monotonic_align/core.pyx":63 * path[index, y] = 1 * if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): * index = index - 1 # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_v_index = (__pyx_v_index - 1); /* "monotonic_align/core.pyx":62 * for y in range(t_y - 1, -1, -1): * path[index, y] = 1 * if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ } } /* "monotonic_align/core.pyx":37 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each_gradtts(int[:,::1] path, float[:,::1] value, int t_x, int t_y, float max_neg_val) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ /* function exit code */ } /* "monotonic_align/core.pyx":67 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { CYTHON_UNUSED int __pyx_v_b; int __pyx_v_i; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; /* "monotonic_align/core.pyx":68 * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< * cdef int i * for i in prange(b, nogil=True): */ __pyx_v_b = (__pyx_v_paths.shape[0]); /* "monotonic_align/core.pyx":70 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) * */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_b; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* "monotonic_align/core.pyx":71 * cdef int i * for i in prange(b, nogil=True): * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_t_4.data = __pyx_v_paths.data; __pyx_t_4.memview = __pyx_v_paths.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; __pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; __pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; __pyx_t_4.suboffsets[1] = -1; __pyx_t_5.data = __pyx_v_values.data; __pyx_t_5.memview = __pyx_v_values.memview; __PYX_INC_MEMVIEW(&__pyx_t_5, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5.shape[0] = __pyx_v_values.shape[1]; __pyx_t_5.strides[0] = __pyx_v_values.strides[1]; __pyx_t_5.suboffsets[0] = -1; __pyx_t_5.shape[1] = __pyx_v_values.shape[2]; __pyx_t_5.strides[1] = __pyx_v_values.strides[2]; __pyx_t_5.suboffsets[1] = -1; __pyx_t_6 = __pyx_v_i; __pyx_t_7 = __pyx_v_i; __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "monotonic_align/core.pyx":70 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) * */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "monotonic_align/core.pyx":67 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ /* function exit code */ } /* Python wrapper */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 67, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 67, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 67, __pyx_L3_error) __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 67, __pyx_L3_error) __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 67, __pyx_L3_error) __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 67, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("maximum_path_c", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 67, __pyx_L1_error) } if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 67, __pyx_L1_error) } if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 67, __pyx_L1_error) } if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 67, __pyx_L1_error) } __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "monotonic_align/core.pyx":75 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_gradtts_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int b = values.shape[0] * */ static PyObject *__pyx_pw_15monotonic_align_4core_3maximum_path_gradtts_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_gradtts_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_xs, __Pyx_memviewslice __pyx_v_t_ys, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_gradtts_c *__pyx_optional_args) { float __pyx_v_max_neg_val = __pyx_k__2; CYTHON_UNUSED int __pyx_v_b; int __pyx_v_i; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; } } /* "monotonic_align/core.pyx":76 * @cython.wraparound(False) * cpdef void maximum_path_gradtts_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: * cdef int b = values.shape[0] # <<<<<<<<<<<<<< * * cdef int i */ __pyx_v_b = (__pyx_v_values.shape[0]); /* "monotonic_align/core.pyx":79 * * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each_gradtts(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_b; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* "monotonic_align/core.pyx":80 * cdef int i * for i in prange(b, nogil=True): * maximum_path_each_gradtts(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val) # <<<<<<<<<<<<<< */ __pyx_t_4.data = __pyx_v_paths.data; __pyx_t_4.memview = __pyx_v_paths.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; __pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; __pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; __pyx_t_4.suboffsets[1] = -1; __pyx_t_5.data = __pyx_v_values.data; __pyx_t_5.memview = __pyx_v_values.memview; __PYX_INC_MEMVIEW(&__pyx_t_5, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5.shape[0] = __pyx_v_values.shape[1]; __pyx_t_5.strides[0] = __pyx_v_values.strides[1]; __pyx_t_5.suboffsets[0] = -1; __pyx_t_5.shape[1] = __pyx_v_values.shape[2]; __pyx_t_5.strides[1] = __pyx_v_values.strides[2]; __pyx_t_5.suboffsets[1] = -1; __pyx_t_6 = __pyx_v_i; __pyx_t_7 = __pyx_v_i; __pyx_f_15monotonic_align_4core_maximum_path_each_gradtts(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_7)) ))), __pyx_v_max_neg_val); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "monotonic_align/core.pyx":79 * * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each_gradtts(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "monotonic_align/core.pyx":75 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_gradtts_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int b = values.shape[0] * */ /* function exit code */ } /* Python wrapper */ static PyObject *__pyx_pw_15monotonic_align_4core_3maximum_path_gradtts_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_15monotonic_align_4core_3maximum_path_gradtts_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; float __pyx_v_max_neg_val; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maximum_path_gradtts_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_xs,&__pyx_n_s_t_ys,&__pyx_n_s_max_neg_val,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_gradtts_c", 0, 4, 5, 1); __PYX_ERR(0, 75, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_gradtts_c", 0, 4, 5, 2); __PYX_ERR(0, 75, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_gradtts_c", 0, 4, 5, 3); __PYX_ERR(0, 75, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_neg_val); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_gradtts_c") < 0)) __PYX_ERR(0, 75, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 75, __pyx_L3_error) __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 75, __pyx_L3_error) __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 75, __pyx_L3_error) __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 75, __pyx_L3_error) if (values[4]) { __pyx_v_max_neg_val = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_max_neg_val == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) } else { __pyx_v_max_neg_val = __pyx_k__2; } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maximum_path_gradtts_c", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 75, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("monotonic_align.core.maximum_path_gradtts_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15monotonic_align_4core_2maximum_path_gradtts_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_xs, __pyx_v_t_ys, __pyx_v_max_neg_val); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15monotonic_align_4core_2maximum_path_gradtts_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_xs, __Pyx_memviewslice __pyx_v_t_ys, float __pyx_v_max_neg_val) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations struct __pyx_opt_args_15monotonic_align_4core_maximum_path_gradtts_c __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("maximum_path_gradtts_c", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 75, __pyx_L1_error) } if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 75, __pyx_L1_error) } if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 75, __pyx_L1_error) } if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 75, __pyx_L1_error) } __pyx_t_1.__pyx_n = 1; __pyx_t_1.max_neg_val = __pyx_v_max_neg_val; __pyx_f_15monotonic_align_4core_maximum_path_gradtts_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_xs, __pyx_v_t_ys, 0, &__pyx_t_1); __pyx_t_2 = __Pyx_void_to_None(NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("monotonic_align.core.maximum_path_gradtts_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__17); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__17); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, {"maximum_path_gradtts_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_3maximum_path_gradtts_c, METH_VARARGS|METH_KEYWORDS, 0}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_core}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "core", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_max_neg_val, __pyx_k_max_neg_val, sizeof(__pyx_k_max_neg_val), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__14); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcore(void) #else __Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_core(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS PyEval_InitThreads(); #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_monotonic_align__core) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "monotonic_align.core")) { if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ __pyx_k_ = (-1e9); /* "monotonic_align/core.pyx":75 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_gradtts_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int b = values.shape[0] * */ __pyx_k__2 = (-1e9); __pyx_k__2 = (-1e9); /* "monotonic_align/core.pyx":1 * cimport cython # <<<<<<<<<<<<<< * from cython.parallel import prange * */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = Py_TYPE(func)->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const char neg_one = (char) -1, const_zero = (char) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
dsdd.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/mpi/blrm/dsdd.c * @version 1.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" #include "starsh-mpi.h" int starsh_blrm__dsdd_mpi(STARSH_blrm **matrix, STARSH_blrf *format, int maxrank, double tol, int onfly) //! Approximate each tile by divide-and-conquer SVD (GESDD function). /*! * @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. * @param[in] maxrank: Maximum possible rank. * @param[in] tol: Relative error tolerance. * @param[in] onfly: Whether not to store dense blocks. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; STARSH_int nblocks_far_local = F->nblocks_far_local; STARSH_int nblocks_near_local = F->nblocks_near_local; // Shortcuts to information about clusters STARSH_cluster *RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; // Following values default to given block low-rank format F, but they are // changed when there are false far-field blocks. STARSH_int new_nblocks_far = F->nblocks_far; STARSH_int new_nblocks_near = F->nblocks_near; STARSH_int new_nblocks_far_local = F->nblocks_far_local; STARSH_int new_nblocks_near_local = F->nblocks_near_local; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; STARSH_int *block_far_local = F->block_far_local; STARSH_int *block_near_local = F->block_near_local; // Places to store low-rank factors, dense blocks and ranks Array **far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int lbi, lbj, bi, bj = 0; double drsdd_time = 0, kernel_time = 0; // Init buffers to store low-rank factors of far-field blocks if needed if(nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far_local); STARSH_MALLOC(far_V, nblocks_far_local); STARSH_MALLOC(far_rank, nblocks_far_local); size_t size_U = 0, size_V = 0; // Simple cycle over all far-field blocks for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_U += RC->size[i]; size_V += CC->size[j]; } size_U *= maxrank; size_V *= maxrank; STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U+offset_U, *V = alloc_V+offset_V; offset_U += nrows*maxrank; offset_V += ncols*maxrank; array_from_buffer(far_U+lbi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V+lbi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } // Work variables int info; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows < ncols ? nrows : ncols; // Get size of temporary arrays int lmn = mn, lwork = (4*lmn+8+nrows+ncols)*lmn, liwork = 8*lmn; double *D, *work; int *iwork; int info; // Allocate temporary arrays STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info); STARSH_PMALLOC(iwork, liwork, info); STARSH_PMALLOC(work, lwork, info); // Compute elements of a block #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #endif starsh_dense_dlrsdd(nrows, ncols, D, nrows, far_U[lbi]->data, nrows, far_V[lbi]->data, ncols, far_rank+lbi, maxrank, tol, work, lwork, iwork); #ifdef OPENMP double time2 = omp_get_wtime(); #pragma omp critical { drsdd_time += time2-time1; kernel_time += time1-time0; } #endif // Free temporary arrays free(D); free(work); free(iwork); } // Get number of false far-field blocks STARSH_int nblocks_false_far_local = 0; STARSH_int *false_far_local = NULL; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) nblocks_false_far_local++; if(nblocks_false_far_local > 0) { // IMPORTANT: `false_far` and `false_far_local` must be in // ascending order for later code to work normally STARSH_MALLOC(false_far_local, nblocks_false_far_local); lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) false_far_local[lbj++] = block_far_local[lbi]; } // Sync list of all false far-field blocks STARSH_int nblocks_false_far = 0; int int_nblocks_false_far_local = nblocks_false_far_local; int *mpi_recvcount, *mpi_offset; int mpi_size, mpi_rank; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); STARSH_MALLOC(mpi_recvcount, mpi_size); STARSH_MALLOC(mpi_offset, mpi_size); MPI_Allgather(&int_nblocks_false_far_local, 1, MPI_INT, mpi_recvcount, 1, MPI_INT, MPI_COMM_WORLD); for(bi = 0; bi < mpi_size; bi++) nblocks_false_far += mpi_recvcount[bi]; mpi_offset[0] = 0; for(bi = 1; bi < mpi_size; bi++) mpi_offset[bi] = mpi_offset[bi-1]+mpi_recvcount[bi-1]; STARSH_int *false_far = NULL; if(nblocks_false_far > 0) STARSH_MALLOC(false_far, nblocks_false_far); MPI_Allgatherv(false_far_local, nblocks_false_far_local, my_MPI_SIZE_T, false_far, mpi_recvcount, mpi_offset, my_MPI_SIZE_T, MPI_COMM_WORLD); free(mpi_recvcount); free(mpi_offset); // Make false_far be in ascending order qsort(false_far, nblocks_false_far, sizeof(*false_far), cmp_size_t); if(nblocks_false_far > 0) { // Update list of near-field blocks new_nblocks_near = nblocks_near+nblocks_false_far; new_nblocks_near_local = nblocks_near_local+nblocks_false_far_local; STARSH_MALLOC(block_near, 2*new_nblocks_near); if(new_nblocks_near_local > 0) STARSH_MALLOC(block_near_local, new_nblocks_near_local); // At first get all near-field blocks, assumed to be dense #pragma omp parallel for schedule(static) for(bi = 0; bi < 2*nblocks_near; bi++) block_near[bi] = F->block_near[bi]; #pragma omp parallel for schedule(static) for(lbi = 0; lbi < nblocks_near_local; lbi++) block_near_local[lbi] = F->block_near_local[lbi]; // Add false far-field blocks #pragma omp parallel for schedule(static) for(bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2*(bi+nblocks_near)] = F->block_far[2*bj]; block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1]; } bi = 0; for(lbi = 0; lbi < nblocks_false_far_local; lbi++) { lbj = false_far_local[lbi]; while(bi < nblocks_false_far && false_far[bi] < lbj) bi++; block_near_local[nblocks_near_local+lbi] = nblocks_near+bi; } // Update list of far-field blocks new_nblocks_far = nblocks_far-nblocks_false_far; new_nblocks_far_local = nblocks_far_local-nblocks_false_far_local; if(new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2*new_nblocks_far); if(new_nblocks_far_local > 0) STARSH_MALLOC(block_far_local, new_nblocks_far_local); bj = 0; lbi = 0; lbj = 0; for(bi = 0; bi < nblocks_far; bi++) { // `false_far` must be in ascending order for this to work if(bj < nblocks_false_far && false_far[bj] == bi) { if(nblocks_false_far_local > lbj && false_far_local[lbj] == bi) { lbi++; lbj++; } bj++; } else { block_far[2*(bi-bj)] = F->block_far[2*bi]; block_far[2*(bi-bj)+1] = F->block_far[2*bi+1]; if(nblocks_far_local > lbi && F->block_far_local[lbi] == bi) { block_far_local[lbi-lbj] = bi-bj; lbi++; } } } } // Update format by creating new format STARSH_blrf *F2; info = starsh_blrf_new_from_coo_mpi(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_far_local, block_far_local, new_nblocks_near, block_near, new_nblocks_near_local, block_near_local, F->type); // Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; if(mpi_rank == 0) STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } // Compute near-field blocks if needed if(onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near_local); size_t size_D = 0; // Simple cycle over all near-field blocks for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; // Update size_D size_D += nrows*ncols; } STARSH_MALLOC(alloc_D, size_D); // For each near-field block compute its elements #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D+offset_D; offset_D += nrows*ncols; //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); //offset_D += near_D[lbi]->size; } array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #pragma omp critical kernel_time += time1-time0; #endif } } // Change sizes of far_rank, far_U and far_V if there were false // far-field blocks lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) { if(far_rank[lbi] == -1) lbj++; else { int shape_U[2] = {far_U[lbi]->shape[0], far_rank[lbi]}; int shape_V[2] = {far_V[lbi]->shape[0], far_rank[lbi]}; array_from_buffer(far_U+lbi-lbj, 2, shape_U, 'd', 'F', far_U[lbi]->data); array_from_buffer(far_V+lbi-lbj, 2, shape_V, 'd', 'F', far_V[lbi]->data); far_rank[lbi-lbj] = far_rank[lbi]; } } if(nblocks_false_far_local > 0 && new_nblocks_far_local > 0) { STARSH_REALLOC(far_rank, new_nblocks_far_local); STARSH_REALLOC(far_U, new_nblocks_far_local); STARSH_REALLOC(far_V, new_nblocks_far_local); } // If all far-field blocks are false, then dealloc buffers if(new_nblocks_far_local == 0 && nblocks_far_local > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } // Dealloc list of false far-field blocks if it is not empty if(nblocks_false_far > 0) free(false_far); if(nblocks_false_far_local > 0) free(false_far_local); // Finish with creating instance of Block Low-Rank Matrix with given // buffers #ifdef OPENMP double mpi_drsdd_time = 0, mpi_kernel_time = 0; MPI_Reduce(&drsdd_time, &mpi_drsdd_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&kernel_time, &mpi_kernel_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if(mpi_rank == 0) { //STARSH_WARNING("DRSDD kernel total time: %e secs", mpi_drsdd_time); //STARSH_WARNING("MATRIX kernel total time: %e secs", mpi_kernel_time); } #endif return starsh_blrm_new_mpi(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
convolution_3x3_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6 * 8; int h_tiles = outh / 6 * 8; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
decorate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE % % D D E C O O R R A A T E % % D D EEE C O O RRRR AAAAA T EEE % % D D E C O O R R A A T E % % DDDD EEEEE CCCC OOO R R A A T EEEEE % % % % % % MagickCore Image Decoration Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantum.h" /* Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B o r d e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BorderImage() surrounds the image with a border of the color defined by % the bordercolor member of the image structure. The width and height % of the border are defined by the corresponding members of the border_info % structure. % % The format of the BorderImage method is: % % Image *BorderImage(const Image *image,const RectangleInfo *border_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o border_info: Define the width and height of the border. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BorderImage(const Image *image, const RectangleInfo *border_info,ExceptionInfo *exception) { Image *border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width=image->columns+(border_info->width << 1); frame_info.height=image->rows+(border_info->height << 1); frame_info.x=(long) border_info->width; frame_info.y=(long) border_info->height; frame_info.inner_bevel=0; frame_info.outer_bevel=0; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); clone_image->matte_color=image->border_color; border_image=FrameImage(clone_image,&frame_info,exception); clone_image=DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->matte_color=image->matte_color; return(border_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F r a m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FrameImage() adds a simulated three-dimensional border around the image. % The color of the border is defined by the matte_color member of image. % Members width and height of frame_info specify the border width of the % vertical and horizontal sides of the frame. Members inner and outer % indicate the width of the inner and outer shadows of the frame. % % The format of the FrameImage method is: % % Image *FrameImage(const Image *image,const FrameInfo *frame_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o frame_info: Define the width and height of the frame and its bevels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info, ExceptionInfo *exception) { #define FrameImageTag "Frame/Image" Image *frame_image; long progress, y; MagickBooleanType status; MagickPixelPacket accentuate, border, highlight, interior, matte, shadow, trough; register IndexPacket *frame_indexes; register long x; register PixelPacket *q; unsigned long bevel_width, height, width; ViewInfo *frame_view; /* Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(frame_info != (FrameInfo *) NULL); bevel_width=(unsigned long) (frame_info->outer_bevel+frame_info->inner_bevel); width=frame_info->width-frame_info->x-bevel_width; height=frame_info->height-frame_info->y-bevel_width; if ((width < image->columns) || (height < image->rows)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); /* Initialize framed image attributes. */ frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue, exception); if (frame_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse) { InheritException(exception,&frame_image->exception); frame_image=DestroyImage(frame_image); return((Image *) NULL); } if (frame_image->matte_color.opacity != OpaqueOpacity) frame_image->matte=MagickTrue; frame_image->page=image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width+=frame_image->columns-image->columns; frame_image->page.height+=frame_image->rows-image->rows; } /* Initialize 3D effects color. */ GetMagickPixelPacket(frame_image,&interior); SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL, &interior); GetMagickPixelPacket(frame_image,&matte); matte.colorspace=RGBColorspace; SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL, &matte); GetMagickPixelPacket(frame_image,&border); border.colorspace=RGBColorspace; SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL, &border); GetMagickPixelPacket(frame_image,&accentuate); accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate))); accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate))); accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate))); accentuate.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&highlight); highlight.red=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.red+(QuantumRange*HighlightModulate))); highlight.green=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.green+(QuantumRange*HighlightModulate))); highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange- HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate))); highlight.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&shadow); shadow.red=QuantumScale*matte.red*ShadowModulate; shadow.green=QuantumScale*matte.green*ShadowModulate; shadow.blue=QuantumScale*matte.blue*ShadowModulate; shadow.opacity=matte.opacity; GetMagickPixelPacket(frame_image,&trough); trough.red=QuantumScale*matte.red*TroughModulate; trough.green=QuantumScale*matte.green*TroughModulate; trough.blue=QuantumScale*matte.blue*TroughModulate; trough.opacity=matte.opacity; if (image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&interior); ConvertRGBToCMYK(&matte); ConvertRGBToCMYK(&border); ConvertRGBToCMYK(&accentuate); ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&shadow); ConvertRGBToCMYK(&trough); } status=MagickTrue; progress=0; frame_view=AcquireCacheView(frame_image); height=(unsigned long) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,height, exception); frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); if (q != (PixelPacket *) NULL) { /* Draw top of ornamental border. */ for (y=0; y < (long) frame_info->outer_bevel; y++) { for (x=0; x < (long) (frame_image->columns-y); x++) { if (x < y) SetPixelPacket(frame_image,&highlight,q,frame_indexes); else SetPixelPacket(frame_image,&accentuate,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (long) frame_image->columns; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=0; y < (long) (frame_info->y-bevel_width); y++) { for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (long) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=0; y < (long) frame_info->inner_bevel; y++) { for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } width=image->columns+((unsigned long) frame_info->inner_bevel << 1)-y; for (x=0; x < (long) width; x++) { if (x < y) SetPixelPacket(frame_image,&shadow,q,frame_indexes); else SetPixelPacket(frame_image,&trough,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (long) (image->columns+2*frame_info->inner_bevel); x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (long) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } /* Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (long) image->rows; y++) { register IndexPacket *frame_indexes; register long x; register PixelPacket *q; /* Initialize scanline with matte color. */ if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y, frame_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } /* Set frame interior to interior color. */ for (x=0; x < (long) image->columns; x++) { SetPixelPacket(frame_image,&interior,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->inner_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (long) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } height=(unsigned long) (frame_info->inner_bevel+frame_info->height- frame_info->y-image->rows-bevel_width+frame_info->outer_bevel); q=QueueCacheViewAuthenticPixels(frame_view,0,(long) (frame_image->rows- height),frame_image->columns,height,exception); if (q != (PixelPacket *) NULL) { /* Draw bottom of ornamental border. */ frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view); for (y=frame_info->inner_bevel-1; y >= 0; y--) { for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) (frame_info->x-bevel_width); x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < y; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (long) (image->columns+2*frame_info->inner_bevel); x++) { if (x >= (long) (image->columns+2*frame_info->inner_bevel-y)) SetPixelPacket(frame_image,&highlight,q,frame_indexes); else SetPixelPacket(frame_image,&accentuate,q,frame_indexes); q++; frame_indexes++; } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (long) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } height=frame_info->height-frame_info->y-image->rows-bevel_width; for (y=0; y < (long) height; y++) { for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (long) width; x++) { SetPixelPacket(frame_image,&matte,q,frame_indexes); q++; frame_indexes++; } for (x=0; x < (long) frame_info->outer_bevel; x++) { SetPixelPacket(frame_image,&shadow,q,frame_indexes); q++; frame_indexes++; } } for (y=frame_info->outer_bevel-1; y >= 0; y--) { for (x=0; x < y; x++) { SetPixelPacket(frame_image,&highlight,q,frame_indexes); q++; frame_indexes++; } for ( ; x < (long) frame_image->columns; x++) { if (x >= (long) (frame_image->columns-y)) SetPixelPacket(frame_image,&shadow,q,frame_indexes); else SetPixelPacket(frame_image,&trough,q,frame_indexes); q++; frame_indexes++; } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } frame_view=DestroyCacheView(frame_view); x=(long) (frame_info->outer_bevel+(frame_info->x-bevel_width)+ frame_info->inner_bevel); y=(long) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); (void) CompositeImage(frame_image,image->compose,image,x,y); return(frame_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RaiseImage() creates a simulated three-dimensional button-like effect % by lightening and darkening the edges of the image. Members width and % height of raise_info define the width of the vertical and horizontal % edge of the effect. % % The format of the RaiseImage method is: % % MagickBooleanType RaiseImage(const Image *image, % const RectangleInfo *raise_info,const MagickBooleanType raise) % % A description of each parameter follows: % % o image: the image. % % o raise_info: Define the width and height of the raise area. % % o raise: A value other than zero creates a 3-D raise effect, % otherwise it has a lowered effect. % */ MagickExport MagickBooleanType RaiseImage(Image *image, const RectangleInfo *raise_info,const MagickBooleanType raise) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) ExceptionInfo *exception; long progress, y; MagickBooleanType status; Quantum foreground, background; ViewInfo *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth", image->filename); foreground=(Quantum) QuantumRange; background=(Quantum) 0; if (raise == MagickFalse) { foreground=(Quantum) 0; background=(Quantum) QuantumRange; } if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Raise image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (long) raise_info->height; y++) { register long x; register PixelPacket *q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < y; x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q++; } for ( ; x < (long) (image->columns-y); x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red* AccentuateFactor+(MagickRealType) foreground*(QuantumRange- AccentuateFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* AccentuateFactor+(MagickRealType) foreground*(QuantumRange- AccentuateFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* AccentuateFactor+(MagickRealType) foreground*(QuantumRange- AccentuateFactor))); q++; } for ( ; x < (long) image->columns; x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+ (MagickRealType) background*(QuantumRange-ShadowFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=(long) raise_info->height; y < (long) (image->rows-raise_info->height); y++) { register long x; register PixelPacket *q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (long) raise_info->width; x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q++; } for ( ; x < (long) (image->columns-raise_info->width); x++) q++; for ( ; x < (long) image->columns; x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+ (MagickRealType) background*(QuantumRange-ShadowFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=(long) (image->rows-raise_info->height); y < (long) image->rows; y++) { register long x; register PixelPacket *q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (long) (image->rows-y); x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* HighlightFactor+(MagickRealType) foreground*(QuantumRange- HighlightFactor))); q++; } for ( ; x < (long) (image->columns-(image->rows-y)); x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red*TroughFactor+ (MagickRealType) background*(QuantumRange-TroughFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* TroughFactor+(MagickRealType) background*(QuantumRange-TroughFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* TroughFactor+(MagickRealType) background*(QuantumRange-TroughFactor))); q++; } for ( ; x < (long) image->columns; x++) { q->red=RoundToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+ (MagickRealType) background*(QuantumRange-ShadowFactor))); q->green=RoundToQuantum(QuantumScale*((MagickRealType) q->green* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q->blue=RoundToQuantum(QuantumScale*((MagickRealType) q->blue* ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
app_baseline.c
/* * JGL@SAFARI */ /** * @file app.c * @brief Template for a Host Application Source File. * * The macros DPU_BINARY and NR_TASKLETS are directly * used in the static functions, and are not passed as arguments of these functions. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <stdint.h> #include <sys/mman.h> #include <omp.h> #include "../../support/common.h" #include "../../support/timer.h" // Pointer declaration static T* A; static unsigned int* histo_host; static unsigned int* histo_hwacha; static unsigned int* histo_host_4; typedef struct Params { unsigned int input_size; unsigned int bins; int n_warmup; int n_reps; const char *file_name; int exp; int n_threads; }Params; /** * @brief creates input arrays * @param nr_elements how many elements in input arrays */ static void read_input(T* A, const Params p) { char dctFileName[100]; FILE *File = NULL; // Open input file unsigned short temp; sprintf(dctFileName, p.file_name); if((File = fopen(dctFileName, "rb")) != NULL) { for(unsigned int y = 0; y < p.input_size; y++) { fread(&temp, sizeof(unsigned short), 1, File); A[y] = (unsigned int)ByteSwap16(temp); if(A[y] >= 4096) A[y] = 4095; } fclose(File); } else { printf("%s does not exist\n", dctFileName); exit(1); } } /** * @brief compute output in the host */ static void histogram_host(unsigned int* histo, T* A, unsigned int bins, unsigned int nr_elements, int exp, unsigned int nr_of_dpus, int t) { omp_set_num_threads(t); if(!exp){ #pragma omp parallel for for (unsigned int i = 0; i < nr_of_dpus; i++) { for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; histo[i * bins + ((d * bins) >> DEPTH)] += 1; } } } else{ #pragma omp parallel for for (unsigned int j = 0; j < nr_elements; j++) { T d = A[j]; #pragma omp atomic update histo[(d * bins) >> DEPTH] += 1; } } } // Params --------------------------------------------------------------------- void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -w <W> # of untimed warmup iterations (default=1)" "\n -e <E> # of timed repetition iterations (default=3)" "\n -t <T> # of threads (default=8)" "\n -x <X> Weak (0) or strong (1) scaling (default=0)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=1536*1024 elements)" "\n -b <B> histogram size (default=256 bins)" "\n -f <F> input image file (default=../input/image_VanHateren.iml)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.input_size = 1536 * 1024; p.bins = 256; p.n_warmup = 1; p.n_reps = 3; p.n_threads = 1; // 8 p.exp = 1; p.file_name = "./image_VanHateren.iml"; int opt; while((opt = getopt(argc, argv, "hi:b:w:e:f:x:t:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'i': p.input_size = atoi(optarg); break; case 'b': p.bins = atoi(optarg); break; case 'w': p.n_warmup = atoi(optarg); break; case 'e': p.n_reps = atoi(optarg); break; case 'f': p.file_name = optarg; break; case 'x': p.exp = atoi(optarg); break; case 't': p.n_threads = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.n_threads > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { if (mlockall(MCL_CURRENT | MCL_FUTURE)) { perror("mlockall failed:"); return 0; } struct Params p = input_params(argc, argv); uint32_t nr_of_dpus; const unsigned int input_size = p.input_size; // Size of input image if(!p.exp) assert(input_size % p.n_threads == 0 && "Input size!"); else assert(input_size % p.n_threads == 0 && "Input size!"); // Input/output allocation A = malloc(input_size * sizeof(T)); T *bufferA = A; if(!p.exp) histo_host = malloc(nr_of_dpus * p.bins * sizeof(unsigned int)); else { histo_host = malloc(p.bins * sizeof(unsigned int)); histo_hwacha = malloc(p.bins * sizeof(unsigned int)); histo_host_4 = malloc(p.bins * sizeof(unsigned int)); } // Create an input file with arbitrary data. read_input(A, p); if(!p.exp) memset(histo_host, 0, nr_of_dpus * p.bins * sizeof(unsigned int)); else { memset(histo_host, 0, p.bins * sizeof(unsigned int)); memset(histo_hwacha, 0, p.bins * sizeof(unsigned int)); memset(histo_host_4, 0, p.bins * sizeof(unsigned int)); } Timer timer; start(&timer, 0, 0); histogram_host(histo_host, A, p.bins, input_size, p.exp, nr_of_dpus, 1); stop(&timer, 0); start(&timer, 1, 0); vec_vvhst_asm(input_size, A, histo_hwacha, p.bins); stop(&timer, 1); start(&timer, 2, 0); histogram_host(histo_host_4, A, p.bins, input_size, p.exp, nr_of_dpus, 4); stop(&timer, 2); bool correct = true; for (int i = 0; i < p.bins; i++) { if (histo_host[i] != histo_hwacha[i]) { printf("Hwacha wrong %d, %d %d\n", i, histo_host[i], histo_hwacha[i]); correct = false; } if (histo_host[i] != histo_host_4[i]) { printf("Omp wrong %d, %d %d\n", i, histo_host[i], histo_host_4[i]); } } printf("******************************\n"); if (correct) printf("Hwacha correct!\n"); else printf("Hwacha wrong T_T\n"); printf("******************************\n"); printf("CPU "); print(&timer, 0, 1); printf("\n"); printf("Hwacha "); print(&timer, 1, 1); printf("\n"); printf("4 threads "); print(&timer, 2, 1); printf("\n"); munlockall(); return 0; }
excl.c
/*************************************************************************** *cr *cr (C) Copyright 2008-2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "atom.h" #include "cutoff.h" #define CELLEN 4.f #define INV_CELLEN (1.f/CELLEN) extern int remove_exclusions( Lattice *lattice, /* the lattice */ float cutoff, /* exclusion cutoff distance */ Atoms *atoms /* array of atoms */ ) { int nx = lattice->dim.nx; int ny = lattice->dim.ny; int nz = lattice->dim.nz; float xlo = lattice->dim.lo.x; float ylo = lattice->dim.lo.y; float zlo = lattice->dim.lo.z; float gridspacing = lattice->dim.h; Atom *atom = atoms->atoms; const float a2 = cutoff * cutoff; const float inv_gridspacing = 1.f / gridspacing; const int radius = (int) ceilf(cutoff * inv_gridspacing) - 1; /* lattice point radius about each atom */ int n; int i, j, k; int ia, ib, ic; int ja, jb, jc; int ka, kb, kc; int index; int koff, jkoff; float x, y, z, q; float dx, dy, dz; float dz2, dydz2, r2; float e; float xstart, ystart; float *pg; int gindex; int ncell, nxcell, nycell, nzcell; int *first, *next; float inv_cellen = INV_CELLEN; Vec3 minext, maxext; /* find min and max extent */ get_atom_extent(&minext, &maxext, atoms); /* number of cells in each dimension */ nxcell = (int) floorf((maxext.x-minext.x) * inv_cellen) + 1; nycell = (int) floorf((maxext.y-minext.y) * inv_cellen) + 1; nzcell = (int) floorf((maxext.z-minext.z) * inv_cellen) + 1; ncell = nxcell * nycell * nzcell; /* allocate for cursor link list implementation */ first = (int *) malloc(ncell * sizeof(int)); for (gindex = 0; gindex < ncell; gindex++) { first[gindex] = -1; } next = (int *) malloc(atoms->size * sizeof(int)); for (n = 0; n < atoms->size; n++) { next[n] = -1; } /* geometric hashing */ for (n = 0; n < atoms->size; n++) { if (0==atom[n].q) continue; /* skip any non-contributing atoms */ i = (int) floorf((atom[n].x - minext.x) * inv_cellen); j = (int) floorf((atom[n].y - minext.y) * inv_cellen); k = (int) floorf((atom[n].z - minext.z) * inv_cellen); gindex = (k*nycell + j)*nxcell + i; next[n] = first[gindex]; first[gindex] = n; } #pragma omp parallel for private(n, x, y, z, q, ic, jc, kc, ia, ib, ja, jb, \ ka, kb, xstart, ystart, dz, k, koff, dz2, \ dy, j, jkoff, dydz2, dx, index, pg, i, r2) /* traverse the grid cells */ for (gindex = 0; gindex < ncell; gindex++) { for (n = first[gindex]; n != -1; n = next[n]) { x = atom[n].x - xlo; y = atom[n].y - ylo; z = atom[n].z - zlo; q = atom[n].q; /* find closest grid point with position less than or equal to atom */ ic = (int) (x * inv_gridspacing); jc = (int) (y * inv_gridspacing); kc = (int) (z * inv_gridspacing); /* find extent of surrounding box of grid points */ ia = ic - radius; ib = ic + radius + 1; ja = jc - radius; jb = jc + radius + 1; ka = kc - radius; kb = kc + radius + 1; /* trim box edges so that they are within grid point lattice */ if (ia < 0) ia = 0; if (ib >= nx) ib = nx-1; if (ja < 0) ja = 0; if (jb >= ny) jb = ny-1; if (ka < 0) ka = 0; if (kb >= nz) kb = nz-1; /* loop over surrounding grid points */ xstart = ia*gridspacing - x; ystart = ja*gridspacing - y; dz = ka*gridspacing - z; for (k = ka; k <= kb; k++, dz += gridspacing) { koff = k*ny; dz2 = dz*dz; dy = ystart; for (j = ja; j <= jb; j++, dy += gridspacing) { jkoff = (koff + j)*nx; dydz2 = dy*dy + dz2; dx = xstart; index = jkoff + ia; pg = lattice->lattice + index; for (i = ia; i <= ib; i++, pg++, dx += gridspacing) { r2 = dx*dx + dydz2; /* If atom and lattice point are too close, set the lattice value * to zero */ //All threads are writing the same value //No need for an atomic update if (r2 < a2) *pg = 0; } } } /* end loop over surrounding grid points */ } /* end loop over atoms in a gridcell */ } /* end loop over gridcells */ /* free memory */ free(next); free(first); return 0; }
GridInit.c
#include "XSbench_header.h" #ifdef MPI #include<mpi.h> #endif // Generates randomized energy grid for each nuclide // Note that this is done as part of initialization (serial), so rand() is used. void generate_grids(double * nuclide_grids, long n_isotopes, long n_gridpoints) { long i, j; for(i=0; i<n_isotopes; i++){ for(j=0; j<n_gridpoints; j++){ nuclide_grids[i*n_gridpoints*6 + j*6 + 0] = ((double)rand()/(double)RAND_MAX); //energy nuclide_grids[i*n_gridpoints*6 + j*6 + 1] = ((double)rand()/(double)RAND_MAX); //total xs nuclide_grids[i*n_gridpoints*6 + j*6 + 2] = ((double)rand()/(double)RAND_MAX); //elastic xs nuclide_grids[i*n_gridpoints*6 + j*6 + 3] = ((double)rand()/(double)RAND_MAX); //absorption xs nuclide_grids[i*n_gridpoints*6 + j*6 + 4] = ((double)rand()/(double)RAND_MAX); //fission xs nuclide_grids[i*n_gridpoints*6 + j*6 + 5] = ((double)rand()/(double)RAND_MAX); //nu fission xs } } } // Verification version of this function (tighter control over RNG) void generate_grids_v(double * nuclide_grids, long n_isotopes, long n_gridpoints) { long i, j; for(i=0; i<n_isotopes; i++){ for(j=0; j<n_gridpoints; j++){ nuclide_grids[i*n_gridpoints*6 + j*6 + 0] = rn_v(); //energy nuclide_grids[i*n_gridpoints*6 + j*6 + 1] = rn_v(); //total xs nuclide_grids[i*n_gridpoints*6 + j*6 + 2] = rn_v(); //elastic xs nuclide_grids[i*n_gridpoints*6 + j*6 + 3] = rn_v(); //absorption xs nuclide_grids[i*n_gridpoints*6 + j*6 + 4] = rn_v(); //fission xs nuclide_grids[i*n_gridpoints*6 + j*6 + 5] = rn_v(); //nu fission xs } } } // Sorts the nuclide grids by energy (lowest -> highest) void sort_nuclide_grids(double * nuclide_grids, long n_isotopes, long n_gridpoints) { long i, j; int (*cmp) (const void *, const void *); cmp = d_compare; for(i=0; i<n_isotopes; i++){ qsort(&nuclide_grids[i*n_gridpoints*6], n_gridpoints, sizeof(double)*6, cmp); } } // Allocates unionized energy grid, and assigns union of energy levels // from nuclide grids to it. double * generate_energy_grid(long n_isotopes, long n_gridpoints, double * nuclide_grids) { long i, j, n_unionized_grid_points; int mype = 0; double * energy_grid; int (*cmp) (const void *, const void *); cmp = d_compare; #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif if(mype == 0) printf("Generating Unionized Energy Grid...\n"); n_unionized_grid_points = n_isotopes*n_gridpoints; energy_grid = (double *)malloc(n_unionized_grid_points*sizeof(double)); if(mype == 0) printf("Copying and sorting nuclide grid energies...\n"); for(i=0; i<n_isotopes; i++){ for(j=0; j<n_gridpoints; j++){ energy_grid[i*n_gridpoints + j] = nuclide_grids[i*n_gridpoints*6 + j*6]; } } qsort(energy_grid, n_unionized_grid_points, sizeof(double), cmp); return energy_grid; } // Searches each nuclide grid for the closest energy level and assigns // pointer from unionized grid to the correct spot in the nuclide grid. // This process is time consuming, as the number of binary searches // required is: binary searches = n_gridpoints * n_isotopes^2 int * generate_grid_ptrs(long n_isotopes, long n_gridpoints, double *nuclide_grids, double * energy_grid) { long i, j; double quarry; int mype = 0; int * grid_ptrs; #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif grid_ptrs = (int *) malloc(n_isotopes*n_gridpoints*n_isotopes*sizeof(int)); if(mype == 0) printf("Assigning pointers to Unionized Energy Grid...\n"); #pragma omp parallel for default(none) \ shared( energy_grid, grid_ptrs, nuclide_grids, n_isotopes, n_gridpoints, mype ) \ private( quarry, i, j ) for(i=0; i<n_isotopes*n_gridpoints; i++){ quarry = energy_grid[i]; #if OMP == 1 if(INFO && mype == 0 && omp_get_thread_num() == 0 && i % 200 == 0) printf("\rAligning Unionized Grid...(%.0lf%% complete)", 100.0 * (double) i / (n_isotopes*n_gridpoints / omp_get_num_threads()) ); #else if(INFO && mype == 0 && i % 200 == 0) printf("\rAligning Unionized Grid...(%.0lf%% complete)", 100.0 * (double) i / (n_isotopes*n_gridpoints / 1) ); #endif for(j=0; j<n_isotopes; j++){ // j is the nuclide i.d. // log n binary search grid_ptrs[n_isotopes*i + j] = binary_search(&nuclide_grids[j*n_gridpoints*6], quarry, n_gridpoints); } } if(mype == 0) printf("\n"); return grid_ptrs; }
functions.h
// // Created by xaris on 22/01/2021. // #include <cstring> #ifndef HYKSORTINCPP_FUNCTIONS_H #define HYKSORTINCPP_FUNCTIONS_H #endif //HYKSORTINCPP_FUNCTIONS_H namespace omp_par { template <class T,class StrictWeakOrdering> void merge(T A_,T A_last,T B_,T B_last,T C_,int p,StrictWeakOrdering comp); template <class T,class StrictWeakOrdering> void merge_sort(T A,T A_last,StrictWeakOrdering comp); template <class T> void merge_sort(T A,T A_last); template <class T> void merge_sort_ptrs(T A,T A_last); template <class T, class I> T reduce(T* A, I cnt); template <class T, class I> void scan(T* A, T* B,I cnt); } template <class T,class StrictWeakOrdering> void omp_par::merge(T A_,T A_last,T B_,T B_last,T C_,int p,StrictWeakOrdering comp){ typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; _DiffType N1=A_last-A_; _DiffType N2=B_last-B_; if(N1==0 && N2==0) return; if(N1==0 || N2==0){ _ValType* A=(N1==0? &B_[0]: &A_[0]); _DiffType N=(N1==0? N2 : N1 ); #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType indx1=( i *N)/p; _DiffType indx2=((i+1)*N)/p; std::memcpy(&C_[indx1], &A[indx1], (indx2-indx1)*sizeof(_ValType)); } return; } //Split both arrays ( A and B ) into n equal parts. //Find the position of each split in the final merged array. int n=10; _ValType* split=new _ValType[p*n*2]; _DiffType* split_size=new _DiffType[p*n*2]; #pragma omp parallel for for(int i=0;i<p;i++){ for(int j=0;j<n;j++){ int indx=i*n+j; _DiffType indx1=(indx*N1)/(p*n); split [indx]=A_[indx1]; split_size[indx]=indx1+(std::lower_bound(B_,B_last,split[indx],comp)-B_); indx1=(indx*N2)/(p*n); indx+=p*n; split [indx]=B_[indx1]; split_size[indx]=indx1+(std::lower_bound(A_,A_last,split[indx],comp)-A_); } } //Find the closest split position for each thread that will //divide the final array equally between the threads. _DiffType* split_indx_A=new _DiffType[p+1]; _DiffType* split_indx_B=new _DiffType[p+1]; split_indx_A[0]=0; split_indx_B[0]=0; split_indx_A[p]=N1; split_indx_B[p]=N2; #pragma omp parallel for for(int i=1;i<p;i++){ _DiffType req_size=(i*(N1+N2))/p; int j=std::lower_bound(&split_size[0],&split_size[p*n],req_size,std::less<_DiffType>())-&split_size[0]; if(j>=p*n) j=p*n-1; _ValType split1 =split [j]; _DiffType split_size1=split_size[j]; j=(std::lower_bound(&split_size[p*n],&split_size[p*n*2],req_size,std::less<_DiffType>())-&split_size[p*n])+p*n; if(j>=2*p*n) j=2*p*n-1; if(abs(split_size[j]-req_size)<abs(split_size1-req_size)){ split1 =split [j]; split_size1=split_size[j]; } split_indx_A[i]=std::lower_bound(A_,A_last,split1,comp)-A_; split_indx_B[i]=std::lower_bound(B_,B_last,split1,comp)-B_; } delete[] split; delete[] split_size; //Merge for each thread independently. #pragma omp parallel for for(int i=0;i<p;i++){ T C=C_+split_indx_A[i]+split_indx_B[i]; //std::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C,comp); //sse<_ValType>::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C); std::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C); } delete[] split_indx_A; delete[] split_indx_B; } template <class T,class StrictWeakOrdering> void omp_par::merge_sort(T A,T A_last,StrictWeakOrdering comp){ typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; int p=omp_get_max_threads(); _DiffType N=A_last-A; if(N<2*p || p==1){ std::sort(A,A_last,comp); return; } //Split the array A into p equal parts. _DiffType* split=new _DiffType[p+1]; split[p]=N; #pragma omp parallel for for(int id=0;id<p;id++){ split[id]=(id*N)/p; } //Sort each part independently. #pragma omp parallel for for(int id=0;id<p;id++){ std::sort(A+split[id],A+split[id+1],comp); } //Merge two parts at a time. _ValType* B=new _ValType[N]; _ValType* A_=&A[0]; _ValType* B_=&B[0]; for(int j=1;j<p;j=j*2){ for(int i=0;i<p;i=i+2*j){ if(i+j<p){ omp_par::merge(A_+split[i],A_+split[i+j],A_+split[i+j],A_+split[(i+2*j<=p?i+2*j:p)],B_+split[i],p,comp); }else{ #pragma omp parallel for for(int k=split[i];k<split[p];k++) B_[k]=A_[k]; } } _ValType* tmp_swap=A_; A_=B_; B_=tmp_swap; } //The final result should be in A. if(A_!=&A[0]){ #pragma omp parallel for for(int i=0;i<N;i++) A[i]=A_[i]; } //Free memory. delete[] split; delete[] B; } template <class T> void omp_par::merge_sort(T A,T A_last){ typedef typename std::iterator_traits<T>::value_type _ValType; if(sizeof(_ValType)<=8*sizeof(_ValType*)) omp_par::merge_sort(A,A_last,std::less<_ValType>()); else omp_par::merge_sort_ptrs(A,A_last); } template <class T> struct DataPtr{ public: T* elem; inline bool operator < ( DataPtr<T> const &other) const { return ((*elem)<(*(other.elem))); } }; template <class T> void omp_par::merge_sort_ptrs(T A,T A_last){ //std::cout<<"Using Pointer sort.\n"; int p=omp_get_max_threads(); typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; _DiffType N=A_last-A; // Make copy and init pointer array to be sorted. DataPtr<_ValType>* B=new DataPtr<_ValType>[N]; _ValType* C=new _ValType[N]; #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType start=(N*i)/p; _DiffType end=(N*(i+1))/p; std::memcpy(&C[start], &A[start], (end-start)*sizeof(_ValType)); for(_DiffType j=start;j<end;j++) B[j].elem=&C[0]+j; } // Sort pointers. omp_par::merge_sort(B,B+N,std::less<DataPtr<_ValType> >()); // Copy data to its sorted position. #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType start=(N*i)/p; _DiffType end=(N*(i+1))/p; for(size_t j=start;j<end;j++) A[j]=*(B[j].elem); } delete[] B; delete[] C; } template <class T, class I> T omp_par::reduce(T* A, I cnt){ T sum=0; #pragma omp parallel for reduction(+:sum) for(I i = 0; i < cnt; i++) sum+=A[i]; return sum; } template <class T, class I> void omp_par::scan(T* A, T* B,I cnt){ int p=omp_get_max_threads(); if(cnt<100*p){ for(I i=1;i<cnt;i++) B[i]=B[i-1]+A[i-1]; return; } I step_size=cnt/p; #pragma omp parallel for for(int i=0; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; if(i!=0)B[start]=0; for(I j=start+1; j<end; j++) B[j]=B[j-1]+A[j-1]; } T* sum=new T[p]; sum[0]=0; for(int i=1;i<p;i++) sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1]; #pragma omp parallel for for(int i=1; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; T sum_=sum[i]; for(I j=start; j<end; j++) B[j]+=sum_; } delete[] sum; } std::vector<unsigned int> init_array(int size,int task) { /* Intializes random number generator */ srand((unsigned) time(NULL)+task*100); std::vector<unsigned int> v(size); // contains size number of 0's std::generate (v.begin(), v.end(), []{ return rand() % 100; }); return v; } void print_array(std::vector<unsigned int> v, int size) { //This program prints the n values of an array int i; // printf("["); for(int i=0; i<size-1; i++){printf("%2d, ",v[i]);} printf("%2d]",v[size-1]); } void print_array_in_process(std::vector<unsigned int> v, int n, int p, int rank ) { printf("Array in process %d of %d with %d elements : ",rank,p,n); //This program prints the n values of an array int i; // printf("["); for(int i=0; i<n-1; i++){printf("%2d, ",v[i]);} printf("%2d]",v[n-1]); printf ( "\n\n"); } void scan(int *A, int* B, int cnt){ int p=omp_get_max_threads(); if(cnt<100*p){ for(int i=1;i<cnt;i++) B[i]=B[i-1]+A[i-1]; return; } int step_size=cnt/p; #pragma omp parallel for for(int i=0; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; if(i!=0)B[start]=0; for(int j=start+1; j<end; j++) B[j]=B[j-1]+A[j-1]; } int* sum=new int[p]; sum[0]=0; for(int i=1;i<p;i++) sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1]; #pragma omp parallel for for(int i=1; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; int sum_=sum[i]; for(int j=start; j<end; j++) B[j]+=sum_; } delete[] sum; } void MergeLists( std::vector<unsigned int> &listA, std::vector<unsigned int> &listB) { unsigned int _low, _high; _low = ( (listA[0] > listB[0]) ? listA[0] : listB[0]); _high = ( (listA[listA.size()-1] < listB[listB.size()-1]) ? listA[listA.size()-1] : listB[listB.size()-1]); // We will do a full merge first ... size_t list_size = listA.size() + listB.size(); std::vector<unsigned int> scratch_list(list_size); unsigned int index1 = 0; unsigned int index2 = 0; for (size_t i = 0; i < list_size; i++) { //The order of (A || B) is important here, //so that index2 remains within bounds if ( (index1 < listA.size()) && ( (index2 >= listB.size()) || (listA[index1] <= listB[index2]) ) ) { scratch_list[i] = listA[index1]; index1++; } else { scratch_list[i] = listB[index2]; index2++; } } listA.clear(); listB.clear(); int ii=0; while ( ( (scratch_list[ii] < _low) || (ii < (list_size/2)) ) && (scratch_list[ii] <= _high) ) { ii++; } if(ii) { listA.insert(listA.end(), scratch_list.begin(), (scratch_list.begin() + ii)); } scratch_list.clear(); }
matrix.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "pix.h" void *vdup(void *v, int vlen) { void *r; if (v == NULL || vlen == 0) return NULL; if ((r = malloc(vlen)) == NULL) pstop("!!! vdup: not enough memory.\n"); memcpy(r, v, vlen); return r; } void vadd_s(int vlen, int op, short *v1, short *v2, short *r) { int i; if (op == 1) { //#pragma acc parallel loop create(r[:vlen]) copyin(v1[:vlen], v2[:vlen]) for (i=0; i < vlen; i++) { r[i] = (short)(v1[i] + v2[i]); } } else if (op == 0) { //#pragma omp parallel for private(i) for (i=0; i < vlen; i++) r[i] = (short)((v1[i] > v2[i]) ? (v1[i] - v2[i]) : 0); } else { //#pragma acc parallel loop create(r[:vlen]) copyin(v1[:vlen], v2[:vlen]) for (i=0; i < vlen; i++) { r[i] = (short)(v1[i] - v2[i]); } } } int vmax_s(int vlen, short *v, int *i_max) { int i, im; short max; im = 0; max = v[0]; for (i=1; i < vlen; i++) { if (max < v[i]) { im = i; max = v[i]; } } if (i_max != NULL) *i_max = im; return (int)max; } void vmaxmin_i(int vlen, int *v, int *vmax, int *i_max, int *vmin, int *i_min) { int i, im1, im2; int max, min; im1 = 0; im2 = 0; max = v[0]; min = v[0]; for (i=1; i < vlen; i++) { if (max < v[i]) { im1 = i; max = v[i]; } if (min > v[i]) { im2 = i; min = v[i]; } } if (i_max != NULL) *i_max = im1; if (i_min != NULL) *i_min = im2; *vmax = max; *vmin = min; } void mx_sub_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) { int x1, x2, y1, y2, sdim_x; int i, j, xx, yy; int mr_max, mx_max; x1 = sdim[0]; x2 = sdim[1]; y1 = sdim[2]; y2 = sdim[3]; sdim_x = x2-x1+1; mr_max = (x2-x1) + (y2-y1)*sdim_x + 1; mx_max = x2 + y2*dim_x; //#pragma acc parallel loop copyin(mr[:mr_max], mx[:mx_max]) copyout(mr[:mr_max]) collase(2) //#pragma omp parallel for private(i,j,xx,yy) for (j=y1; j <= y2; j++) { for (i=x1; i <= x2; i++) { yy = (i-x1) + (j-y1)*sdim_x; xx = i + j*dim_x; mr[yy] = mx[xx]; } } } // void mx_subT_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) // { // int x1, x2, y1, y2, sdim_y; // int i, j, xx, yy; // // x1 = sdim[0]; // x2 = sdim[1]; // y1 = sdim[2]; // y2 = sdim[3]; // sdim_y = y2-y1+1; // // #pragma omp parallel for private(i,j,xx,yy) // for (j=y1; j <= y2; j++) { // for (i=x1; i <= x2; i++) { // yy = (i-x1)*sdim_y + (j-y1); // if (i >= 0 && j >= 0 && i < dim_x && j < dim_y) { // xx = i + j*dim_x; // mr[yy] = mx[xx]; // } // else // mr[yy] = 0; // }} // } void mx_rsub_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) { int x1, x2, y1, y2; int i, j, xx; x1 = sdim[0]; x2 = sdim[1]; y1 = sdim[2]; y2 = sdim[3]; //#pragma omp parallel for private(i,j,xx) for (j=y1; j <= y2; j++) { for (i=x1; i <= x2; i++) { xx = i + j*dim_x; mr[xx] = mx[xx]; }} } int mx_inv(int N, double *A, int NRHS, double *B) { double *work; int *ipiv, lwork, NB=16, info; char *uplo="U"; lwork = N*NB; work = malloc(lwork*sizeof(double)); ipiv = malloc(N*sizeof(int)); if (work==NULL || ipiv==NULL) pstop("!!! mx_inv: not enough memory.\n"); dsysv_(uplo, &N, &NRHS, A, &N, ipiv, B, &N, work, &lwork, &info, 1); free(work); free(ipiv); return info; }
convolution_sgemm_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); // Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator); else tmp.create(maxk, inch, size, 2u * packn, packn, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v2, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v3, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v4, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v5, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v6, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v7, (t1) \n\t" "add t1, t1, t3 \n\t" "vsseg8e.v v0, (t2) \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 8; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr[4] = img0[l + packn * 4]; tmpptr[5] = img0[l + packn * 5]; tmpptr[6] = img0[l + packn * 6]; tmpptr[7] = img0[l + packn * 7]; tmpptr += 8; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(img0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(img0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(img0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(img0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); img0 += size * packn; tmpptr += packn * 8; #endif } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v2, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v3, (t1) \n\t" "vsseg4e.v v0, (t2) \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "v2", "v3", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 4; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr[2] = img0[l + packn * 2]; tmpptr[3] = img0[l + packn * 3]; tmpptr += 4; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); img0 += size * packn; tmpptr += packn * 4; #endif } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { #if C906 #ifdef RVV_SPEC_0_7 asm volatile( "mv t3, %[LEN] \n\t" "mv t1, %[SRC] \n\t" "mv t2, %[TMP] \n\t" "slli t3, t3, 1 \n\t" "vle.v v0, (t1) \n\t" "add t1, t1, t3 \n\t" "vle.v v1, (t1) \n\t" "add t1, t1, t3 \n\t" : : [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr) : "cc", "memory", "v0", "v1", "t1", "t2", "t3"); img0 += size * packn; tmpptr += packn * 2; #else for (int l = 0; l < packn; l++) { tmpptr[0] = img0[l]; tmpptr[1] = img0[l + packn]; tmpptr += 2; } img0 += size * packn; #endif #else vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); img0 += size * packn; tmpptr += packn * 2; #endif } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(img0, vl); vse16_v_f16m1(tmpptr, _val, vl); img0 += size * packn; tmpptr += packn; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); int i = 0; for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); _sum2 = vle16_v_f16m1(bias + p * packn, vl); _sum3 = vle16_v_f16m1(bias + p * packn, vl); _sum4 = vle16_v_f16m1(bias + p * packn, vl); _sum5 = vle16_v_f16m1(bias + p * packn, vl); _sum6 = vle16_v_f16m1(bias + p * packn, vl); _sum7 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; __fp16 val4 = *tmpptr++; __fp16 val5 = *tmpptr++; __fp16 val6 = *tmpptr++; __fp16 val7 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl); vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl); vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl); vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); _sum2 = vle16_v_f16m1(bias + p * packn, vl); _sum3 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; __fp16 val2 = *tmpptr++; __fp16 val3 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum0 = vle16_v_f16m1(bias + p * packn, vl); _sum1 = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val0 = *tmpptr++; __fp16 val1 = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum, vl); outptr0 += packn; } } } static void convolution_im2col_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * packn; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * packn; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vse16_v_f16m1(ptr, _val, vl); sptr += stride_w * packn; ptr += packn; } sptr += gap; } } } } } im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); }
filter_c99.c
/** * Copyright 2016-2017 Andreas Schäfer * Copyright 2017 Google * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifdef __ICC #include <omp.h> #endif #ifdef _MSC_BUILD #pragma warning( push ) #pragma warning( disable : 4514 ) #endif #include <math.h> #ifdef _MSC_BUILD #pragma warning( pop ) #endif /** * Computes a 2D gaussian filter with a 5x5 stencil accross the YZ-plane. */ void filter_c99(double *data_new, const double *data_old, int dim_x, int dim_y, int dim_z) { // cast types here to maintain a C++-compatible signature: double (* const restrict grid_old)[dim_y][dim_x] = (double (* const)[dim_y][dim_x])data_old; double (* restrict grid_new)[dim_y][dim_x] = (double (* )[dim_y][dim_x])data_new; double weights[5][5]; double sum = 0; for (int y = 0; y < 5; ++y) { for (int x = 0; x < 5; ++x) { double x_component = x - 2; double y_component = y - 2; weights[y][x] = exp(-0.5 * (x_component * x_component + y_component * y_component)) / 2 / 3.14159265358979323846; sum += weights[y][x]; } } for (int y = 0; y < 5; ++y) { for (int x = 0; x < 5; ++x) { weights[y][x] /= sum; } } // we exploit symmetry to avoid redudant loads of weights: double weight_00 = weights[2][2]; double weight_01 = weights[2][1]; double weight_02 = weights[2][0]; double weight_11 = weights[1][1]; double weight_12 = weights[1][0]; double weight_22 = weights[0][0]; #pragma omp parallel for schedule(static) for (int z = 2; z < (dim_z - 2); ++z) { for (int y = 2; y < (dim_y - 2); ++y) { #ifdef __ICC #pragma vector always nontemporal #endif for (int x = 0; x < dim_x; ++x) { grid_new[z][y][x] = grid_old[z - 2][y - 2][x] * weight_22 + grid_old[z - 2][y - 1][x] * weight_12 + grid_old[z - 2][y + 0][x] * weight_02 + grid_old[z - 2][y + 1][x] * weight_12 + grid_old[z - 2][y + 2][x] * weight_22 + grid_old[z - 1][y - 2][x] * weight_12 + grid_old[z - 1][y - 1][x] * weight_11 + grid_old[z - 1][y + 0][x] * weight_01 + grid_old[z - 1][y + 1][x] * weight_11 + grid_old[z - 1][y + 2][x] * weight_12 + grid_old[z + 0][y - 2][x] * weight_02 + grid_old[z + 0][y - 1][x] * weight_01 + grid_old[z + 0][y + 0][x] * weight_00 + grid_old[z + 0][y + 1][x] * weight_01 + grid_old[z + 0][y + 2][x] * weight_02 + grid_old[z + 1][y - 2][x] * weight_12 + grid_old[z + 1][y - 1][x] * weight_11 + grid_old[z + 1][y + 0][x] * weight_01 + grid_old[z + 1][y + 1][x] * weight_11 + grid_old[z + 1][y + 2][x] * weight_12 + grid_old[z + 2][y - 2][x] * weight_22 + grid_old[z + 2][y - 1][x] * weight_12 + grid_old[z + 2][y + 0][x] * weight_02 + grid_old[z + 2][y + 1][x] * weight_12 + grid_old[z + 2][y + 2][x] * weight_22; } } } }
rumi-64-192-22r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdint.h> #include <stdbool.h> #include <string.h> // using namespace std; typedef unsigned long long int UINT64; // #define DEBUG 1 #define Nthreads 1 #define STEP ((1 << 10) - 1) // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = 10*time(NULL) + 11*offset; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } void string_state(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { printf("%02x", state[i]); } } void string_tweak(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { printf("%02x", state[i]); } } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char p1_old[16], p2_old[16]; unsigned char c3_old[16], c4_old[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (UINT64 t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) { p1[i] = rand() & 0xff; p1_old[i] = p1[i]; } // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) { p2[i] = p1[i] ^ dp[i]; p2_old[i] = p2[i]; } enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) { c3[i] = p1[i] ^ dc[i]; c3_old[i] = c3[i]; } // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) { c4[i] = p2[i] ^ dc[i]; c4_old[i] = c4[i]; } dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; printf("%s\n", "A right quartet found :)\n"); printf("p1: "); string_state(p1_old, ver); printf("\n"); printf("p2: "); string_state(p2_old, ver); printf("\n"); printf("p3: "); string_state(c3, ver); printf("\n"); printf("p4: "); string_state(c4, ver); printf("\n"); printf("c1: "); string_state(p1, ver); printf("\n"); printf("c2: "); string_state(p2, ver); printf("\n"); printf("c3: "); string_state(c3_old, ver); printf("\n"); printf("c4: "); string_state(c4_old, ver); printf("\n"); printf("k1: "); string_tweak(k1, ver); printf("\n"); printf("k2: "); string_tweak(k2, ver); printf("\n"); printf("k3: "); string_tweak(k3, ver); printf("\n"); printf("k4: "); string_tweak(k4, ver); printf("\n"); } } return num; } double send_boomerangs(int R, int ver, int N1, UINT64 N2, UINT64 N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, log(N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); //init_prng(ID); for (UINT64 j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); if ((j & STEP) == 0){ printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2); } } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main(int argc, char *argv[]) { //srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); int PROGRAMNUMBER; PROGRAMNUMBER = atoi(argv[1]); init_prng(PROGRAMNUMBER); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 1; // Number of independent experiments int R = 22; // Number of rounds int ver = 2; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "0000000000000200"; char dc_str[] = "5605060000450605"; char dk1_str[] = "0000000001000000000000000B0000000000000008000000"; char dk2_str[] = "000000000020000000000000003000000000000000D00000"; // Expected probability: 2^(-38.84) // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 16; int deg2 = 17; UINT64 N2 = (UINT64)1 << deg1; // Number of bunches per thread: N2 = 2^(deg1) UINT64 N3 = (UINT64)1 << deg2; // Number of queries per bunch: N3 = 2^(deg2) //################### Number of total queries : N1*N2*N3 ############### double sum = 0; for (int i = 0; i < n; i++) { sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); } char name[30]; sprintf(name, "result_%d.txt", PROGRAMNUMBER); fic = fopen(name, "w"); fprintf(fic, "Program number = %d", PROGRAMNUMBER); fprintf(fic, "\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); fclose(fic); printf("Program number = %d", PROGRAMNUMBER); printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); return 0; } //g++ rumi-64-192-22r.c -o rumi-64-192-22r -fopenmp -O3 --std=c++11
GB_binop__isge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
soxr.c
/* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" #if AVUTIL_FOUND #include <libavutil/cpu.h> #endif #if WITH_DEV_TRACE #include <stdarg.h> #include <stdio.h> int _soxr_trace_level; void _soxr_trace(char const * fmt, ...) { va_list args; va_start(args, fmt); vfprintf(stderr, fmt, args); fputc('\n', stderr); va_end(args); } #endif char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; #if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S #include "filter.h" #else #define lsx_to_3dB(x) ((x)/(x)) #endif soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */ unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q; double rej; memset(p, 0, sizeof(*p)); if (quality > SOXR_PRECISIONQ) { p->e = "invalid quality type"; return spec; } flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0; p->phase_response = "\62\31\144"[(recipe & 0x30)>>4]; p->stopband_begin = 1; p->precision = quality == SOXR_QQ ? 0 : quality <= SOXR_16_BITQ ? 16 : quality <= SOXR_32_BITQ ? 4 + quality * 4 : quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */ 0; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) { #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { /* TODO: move to soxr-lsr.c */ static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - SOXR_LSR0Q]; if (quality == SOXR_LSR2Q) { p->flags &= ~SOXR_ROLLOFF_NONE; p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ; } } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / lsx_to_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64) #if defined __GNUC__ && defined __x86_64__ #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm__ __volatile__ ( \ "cpuid \n\t" \ : "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \ : "a" (type), "c" (0)); #elif defined __GNUC__ && defined __i386__ #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm__ __volatile__ ( \ "mov %%ebx, %%edi \n\t" \ "cpuid \n\t" \ "xchg %%edi, %%ebx \n\t" \ : "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \ : "a" (type), "c" (0)); #elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500 void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue); #pragma intrinsic(__cpuidex) #define CPUID(type, eax_, ebx_, ecx_, edx_) do { \ int regs[4]; \ __cpuidex(regs, type, 0); \ eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \ } while(0) #elif defined _M_X64 && defined _MSC_VER void __cpuidex(int CPUInfo[4], int info_type); #pragma intrinsic(__cpuidex) #define CPUID(type, eax_, ebx_, ecx_, edx_) do { \ int regs[4]; \ __cpuidex(regs, type); \ eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \ } while(0) #elif defined _M_IX86 && defined _MSC_VER #define CPUID(type, eax_, ebx_, ecx_, edx_) \ __asm pushad \ __asm mov eax, type \ __asm xor ecx, ecx \ __asm cpuid \ __asm mov eax_, eax \ __asm mov ebx_, ebx \ __asm mov ecx_, ecx \ __asm mov edx_, edx \ __asm popad #endif #endif #if WITH_CR32S && WITH_CR32 static bool cpu_has_simd32(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __i386__ || defined _M_IX86 enum {SSE = 1 << 25, SSE2 = 1 << 26}; unsigned eax_, ebx_, ecx_, edx_; CPUID(1, eax_, ebx_, ecx_, edx_); return (edx_ & (SSE|SSE2)) != 0; #elif defined AV_CPU_FLAG_NEON return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON); #else return false; #endif } static bool should_use_simd32(void) { char const * e; return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) : ((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32(); } #else #define should_use_simd32() true #endif #if WITH_CR64S && WITH_CR64 #if defined __GNUC__ #define XGETBV(type, eax_, edx_) \ __asm__ __volatile__ ( \ ".byte 0x0f, 0x01, 0xd0\n" \ : "=a"(eax_), "=d"(edx_) : "c" (type)); #elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219 #include <immintrin.h> #define XGETBV(type, eax_, edx_) do { \ union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \ eax_ = a.y[0], edx_ = a.y[1]; \ } while(0) #elif defined _M_IX86 && defined _MSC_VER #define XGETBV(type, eax_, edx_) \ __asm pushad \ __asm mov ecx, type \ __asm _emit 0x0f \ __asm _emit 0x01 \ __asm _emit 0xd0 \ __asm mov eax_, eax \ __asm mov edx_, edx \ __asm popad #else #define XGETBV(type, eax_, edx_) eax_ = edx_ = 0 #endif static bool cpu_has_simd64(void) { enum {OSXSAVE = 1 << 27, AVX = 1 << 28}; unsigned eax_, ebx_, ecx_, edx_; CPUID(1, eax_, ebx_, ecx_, edx_); if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) { XGETBV(0, eax_, edx_); return (eax_ & 6) == 6; } return false; } static bool should_use_simd64(void) { char const * e; return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) : ((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64(); } #else #define should_use_simd64() true #endif extern control_block_t _soxr_rate32_cb, _soxr_rate32s_cb, _soxr_rate64_cb, _soxr_rate64s_cb, _soxr_vr32_cb; static void runtime_num(char const * env_name, int min, int max, unsigned * field) { char const * e = getenv(env_name); if (e) { int i = atoi(e); if (i >= min && i <= max) *field = (unsigned)i; } } static void runtime_flag(char const * env_name, unsigned n_bits, unsigned n_shift, unsigned long * flags) { char const * e = getenv(env_name); if (e) { int i = atoi(e); unsigned long mask = (1UL << n_bits) - 1; if (i >= 0 && i <= (int)mask) *flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift); } } soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate!=0? input_rate!=0? input_rate / output_rate : -1 : input_rate!=0? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; #if WITH_DEV_TRACE #define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x)) char const * e = getenv("SOXR_TRACE"); _soxr_trace_level = e? atoi(e) : 0; { static char const arch[] = {_(char), _(short), _(int), _(long), _(long long) , ' ', _(float), _(double), _(long double) , ' ', _(int *), _(int (*)(int)) , ' ', HAVE_BIGENDIAN ? 'B' : 'L' #if defined _OPENMP , ' ', 'O', 'M', 'P' #endif , 0}; #undef _ lsx_debug("arch: %s", arch); } #endif if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { control_block_t * control_block; p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size); runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size); runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes); runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads); runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags); runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags); runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if WITH_CR32 || WITH_CR32S || WITH_VR32 if (0 #if WITH_VR32 || ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR)) #endif #if WITH_CR32 || WITH_CR32S || !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) #endif ) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; control_block = #if WITH_VR32 ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb : #endif #if WITH_CR32S !WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb; } #if WITH_CR64 || WITH_CR64S else #endif #endif #if WITH_CR64 || WITH_CR64S { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; control_block = #if WITH_CR64S !WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb : #endif &_soxr_rate64_cb; } #endif memcpy(&p->control_block, control_block, sizeof(p->control_block)); if (p->num_channels && io_ratio!=0) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return (p->q_spec.flags & RESET_ON_CLEAR)? soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler = NULL; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
depend-2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void bar (int a[10][10][10]); void foo (int a[10][10][10], int **b) { int c[10][10][10]; #pragma omp task depend(out: a[2:4][3:][:7], b[1:7][2:8]) bar (a); int i = 1, j = 3, k = 2, l = 6; #pragma omp task depend(in: a[++i:++j][++k:][:++l]) bar (a); #pragma omp task depend(out: a[7:2][:][:], c[5:2][:][:]) { bar (c); bar (a); } }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "libperf.h" #include "libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <locale.h> #if HAVE_MPI # include <mpi.h> #elif HAVE_RTE # include<rte.h> #endif #define MAX_BATCH_FILES 32 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; } test_type_t; struct perftest_context { ucx_perf_params_t params; const char *server_addr; int port; #if HAVE_MPI int mpi; #endif unsigned cpu; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oqM:T:d:x:A:B" test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency"}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency"}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency"}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate"}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / message rate"}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / message rate"}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / message rate"}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate"}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate"}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate"}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "UCP tag match latency"}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP tag match bandwidth"}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "UCP put latency"}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP put bandwidth"}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP get latency / bandwidth / message rate"}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic add bandwidth / message rate"}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic fetch-and-add latency / bandwidth / message rate"}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic swap latency / bandwidth / message rate"}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic compare-and-swap latency / bandwidth / message rate"}, {NULL} }; static int safe_send(int sock, void *data, size_t size) { size_t total = 0; int ret; while (total < size) { ret = send(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("send() failed: %m"); return -1; } total += ret; } return 0; } static int safe_recv(int sock, void *data, size_t size) { size_t total = 0; int ret; while (total < size) { ret = recv(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("recv() failed: %m"); return -1; } total += ret; } return 0; } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final) { static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n"; static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n"; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *test_api_str; const char *test_data_str; test_type_t *test; unsigned i; if (ctx->flags & TEST_FLAG_PRINT_TEST) { for (test = tests; test->name; ++test) { if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) { break; } } if (test->name != NULL) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params)); } } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void usage(struct perftest_context *ctx, const char *program) { test_type_t *test; printf("Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); #if HAVE_MPI printf("This test can be also launched as an MPI application\n"); #elif HAVE_RTE printf("This test can be also launched as an libRTE application\n"); #endif printf(" Common options:\n"); printf("\n"); printf(" Test options:\n"); printf(" -t <test> Test to run.\n"); for (test = tests; test->name; ++test) { printf(" %11s : %s.\n", test->name, test->desc); } printf("\n"); printf(" -D <layout>[,<layout>] Data layout. Default is \"short\" in UCT," " \"contig\" in UCP and previous one " "in batch mode. Second parameter is for " "receive side in UCP only.\n"); printf(" short : Use short messages API (cannot used for get).\n"); printf(" bcopy : Use copy-out API (cannot used for atomics).\n"); printf(" zcopy : Use zero-copy API (cannot used for atomics).\n"); printf(" contig : Use continuous datatype in UCP tests.\n"); printf(" iov : Use IOV datatype in UCP tests.\n"); printf("\n"); printf(" -d <device> Device to use for testing.\n"); printf(" -x <tl> Transport to use for testing.\n"); printf(" -c <cpu> Set affinity to this CPU. (off)\n"); printf(" -n <iters> Number of iterations to run. (%ld)\n", ctx->params.max_iter); printf(" -s <size> List of buffer sizes separated by comma, which " "make up a single message. Default is (%zu). " "For example, \"-s 16,48,8192,8192,14\"\n", ctx->params.msg_size_list[0]); printf(" -H <size> AM Header size. (%zu)\n", ctx->params.am_hdr_size); printf(" -w <iters> Number of warm-up iterations. (%zu)\n", ctx->params.warmup_iter); printf(" -W <count> Flow control window size, for active messages. (%u)\n", ctx->params.uct.fc_window); printf(" -O <count> Maximal number of uncompleted outstanding sends. (%u)\n", ctx->params.max_outstanding); printf(" -i <count> Distance between starting address of consecutive " "IOV entries. The same as UCT uct_iov_t stride.\n"); printf(" -N Use numeric formatting - thousands separator.\n"); printf(" -f Print only final numbers.\n"); printf(" -v Print CSV-formatted output.\n"); printf(" -p <port> TCP port to use for data exchange. (%d)\n", ctx->port); printf(" -b <batchfile> Batch mode. Read and execute tests from a file.\n"); printf(" Every line of the file is a test to run. " "The first word is the\n"); printf(" test name, and the rest are command-line " "arguments for the test.\n"); printf(" -M <thread> Thread support level for progress engine (single).\n"); printf(" single : Only the master thread can access.\n"); printf(" serialized : One thread can access at a time.\n"); printf(" multi : Multiple threads can access.\n"); printf(" -T <threads> Number of threads in the test (1); " "also implies \"-M multi\".\n"); printf(" -A <mode> Async progress mode. (thread)\n"); printf(" thread : Use separate progress thread.\n"); printf(" signal : Use signal based timer.\n"); printf(" -B Register memory with NONBLOCK flag.\n"); #if HAVE_MPI printf(" -P <0|1> Disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h Show this help message.\n"); printf("\n"); printf(" Server options:\n"); printf(" -l Accept clients in an infinite loop\n"); printf("\n"); } static const char *__basename(const char *path) { const char *p = strrchr(path, '/'); return (p == NULL) ? path : p; } static ucs_status_t parse_ucp_datatype_params(const char *optarg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(optarg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(optarg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_message_sizes_params(const char *optarg, ucx_perf_params_t *params) { char *optarg_ptr, *optarg_ptr2; size_t token_num, token_it; const char delim = ','; optarg_ptr = (char *)optarg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; free(params->msg_size_list); /* free previously allocated buffer */ params->msg_size_list = malloc(sizeof(*params->msg_size_list) * token_num); if (NULL == params->msg_size_list) { return UCS_ERR_NO_MEMORY; } optarg_ptr = (char *)optarg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static void init_test_params(ucx_perf_params_t *params) { params->api = UCX_PERF_API_LAST; params->command = UCX_PERF_CMD_LAST; params->test_type = UCX_PERF_TEST_TYPE_LAST; params->thread_mode = UCS_THREAD_MODE_SINGLE; params->thread_count = 1; params->async_mode = UCS_ASYNC_MODE_THREAD; params->wait_mode = UCX_PERF_WAIT_MODE_LAST; params->max_outstanding = 1; params->warmup_iter = 10000; params->am_hdr_size = 8; params->alignment = ucs_get_page_size(); params->max_iter = 1000000l; params->max_time = 0.0; params->report_interval = 1.0; params->flags = UCX_PERF_TEST_FLAG_VERBOSE; params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->msg_size_cnt = 1; params->iov_stride = 0; params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->uct.dev_name, "<none>"); strcpy(params->uct.tl_name, "<none>"); params->msg_size_list = malloc(sizeof(*params->msg_size_list) * params->msg_size_cnt); params->msg_size_list[0] = 8; } static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg) { test_type_t *test; char *optarg2 = NULL; switch (opt) { case 'd': ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name), "%s", optarg); return UCS_OK; case 'x': ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name), "%s", optarg); return UCS_OK; case 't': for (test = tests; test->name; ++test) { if (!strcmp(optarg, test->name)) { params->api = test->api; params->command = test->command; params->test_type = test->test_type; break; } } if (test->name == NULL) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (0 == strcmp(optarg, "short")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (0 == strcmp(optarg, "bcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (0 == strcmp(optarg, "zcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(optarg, &params->ucp.send_datatype)) { optarg2 = strchr(optarg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->ucp.recv_datatype)) { return -1; } } } else { ucs_error("Invalid option argument for -D"); return -1; } return UCS_OK; case 'i': params->iov_stride = atol(optarg); return UCS_OK; case 'n': params->max_iter = atol(optarg); return UCS_OK; case 's': return parse_message_sizes_params(optarg, params); case 'H': params->am_hdr_size = atol(optarg); return UCS_OK; case 'W': params->uct.fc_window = atoi(optarg); return UCS_OK; case 'O': params->max_outstanding = atoi(optarg); return UCS_OK; case 'w': params->warmup_iter = atol(optarg); return UCS_OK; case 'o': params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'M': if (0 == strcmp(optarg, "single")) { params->thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (0 == strcmp(optarg, "serialized")) { params->thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (0 == strcmp(optarg, "multi")) { params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->thread_count = atoi(optarg); params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; case 'A': if (0 == strcmp(optarg, "thread")) { params->async_mode = UCS_ASYNC_MODE_THREAD; return UCS_OK; } else if (0 == strcmp(optarg, "signal")) { params->async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t read_batch_file(FILE *batch_file, ucx_perf_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("Invalid argument in batch file: -%c, status(%d):\"%s\"", c, status, ucs_status_string(status)); return status; } } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); init_test_params(&ctx->params); ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; #if HAVE_MPI ctx->mpi = !isatty(0); #endif optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = strdup(optarg); } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; ctx->cpu = atoi(optarg); break; case 'P': #if HAVE_MPI ctx->mpi = atoi(optarg); break; #endif case 'h': usage(ctx, __basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, __basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group) { #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned sync; sync = magic; safe_send(group->connfd, &sync, sizeof(unsigned)); sync = 0; safe_recv(group->connfd, &sync, sizeof(unsigned)); ucs_assert(sync == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size)); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size)); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (ret < 0) { ucs_error("setsockopt(SO_REUSEADDR) failed: %m"); status = UCS_ERR_INVALID_PARAM; goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); safe_recv(connfd, &ctx->params, sizeof(ctx->params)); if (ctx->params.msg_size_cnt) { ctx->params.msg_size_list = malloc(sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); if (NULL == ctx->params.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } safe_recv(connfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params)); if (ctx->params.msg_size_cnt) { safe_send(sockfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = &ctx->sock_rte_group; ctx->params.rte = &sock_rte; ctx->params.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if HAVE_MPI static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group) { #pragma omp master MPI_Barrier(MPI_COMM_WORLD); #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; #elif HAVE_RTE static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group) { #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { ucs_trace_func(""); #if HAVE_MPI int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; #elif HAVE_RTE rte_group_t group; rte_init(NULL, NULL, &group); if (0 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = group; ctx->params.rte = &ext_rte; ctx->params.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #if HAVE_MPI MPI_Finalize(); #elif HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { cpu_set_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { if (ctx->cpu >= nr_cpus) { ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } CPU_SET(ctx->cpu, &cpuset); ret = sched_setaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, ucx_perf_params_t *parent_params, unsigned depth) { ucx_perf_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(parent_params, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } params = *parent_params; while ((status = read_batch_file(batch_file, &params, &ctx->test_names[depth])) == UCS_OK) { status = run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); if ((NULL == parent_params->msg_size_list) && (NULL != params.msg_size_list)) { free(params.msg_size_list); params.msg_size_list = NULL; } } fclose(batch_file); return UCS_OK; } static ucs_status_t run_test(struct perftest_context *ctx) { ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int rte = 0; int ret; /* Parse command line */ if (parse_opts(&ctx, argc, argv) != UCS_OK) { ret = -127; goto out; } #ifdef __COVERITY__ /* coverity[dont_call] */ rte = rand(); /* Shut up deadcode error */ #endif #if HAVE_MPI /* Don't try MPI when running interactively */ if (ctx.mpi && (MPI_Init(&argc, &argv) == 0)) { rte = 1; } #elif HAVE_RTE rte = 1; #endif status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Create RTE */ status = (rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out: if (ctx.params.msg_size_list) { free(ctx.params.msg_size_list); } return ret; }
GB_unop__identity_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_uint64) // op(A') function: GB (_unop_tran__identity_int8_uint64) // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_uint64) ( int8_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BoundingBox.inc.h
// This file includes functions dealing with bounding box in image processing. // // Code exploited by Feng Wang <feng.wff@gmail.com> // // This Source Code Form is subject to the terms of the BSD lisence. // // calcRect, modernPosit are copied from AFLW toolkit, which is available for non-commercial research purposes only. #pragma once #include <opencv2\opencv.hpp> using namespace cv; #undef assert #define assert(_Expression) if(!(_Expression)) printf("error: %s %d, %s\n", __FILE__, __LINE__, #_Expression) namespace FaceInception { struct FaceAndPoints { Mat image; std::vector<Point2d> points; }; std::vector<cv::Point3f> worldPts = { cv::Point3f(-0.361, 0.309, -0.521), cv::Point3f(0.235, 0.296, -0.46), cv::Point3f(-0.071, -0.023, -0.13), cv::Point3f(-0.296, -0.348, -0.472), cv::Point3f(0.156, -0.366, -0.417) }; inline double IoU(Rect2d rect1, Rect2d rect2) { double left = std::max<double>(rect1.x, rect2.x); double top = std::max<double>(rect1.y, rect2.y); double right = std::min<double>(rect1.x + rect1.width, rect2.x + rect2.width); double bottom = std::min<double>(rect1.y + rect1.height, rect2.y + rect2.height); double overlap = max<double>(right - left, 0) * max<double>(bottom - top, 0); return overlap / (rect1.width * rect1.height + rect2.width * rect2.height - overlap); } inline Rect2d BoundingBoxRegressionTarget(Rect2d data_rect, Rect2d ground_truth) { return Rect2d((ground_truth.x - data_rect.x) / data_rect.width, (ground_truth.y - data_rect.y) / data_rect.height, log(ground_truth.width / data_rect.width), log(ground_truth.height / data_rect.height)); } void modernPosit(cv::Mat &rot, cv::Point3f &trans, std::vector<cv::Point2d> imagePts, std::vector<cv::Point3f> worldPts, double focalLength, cv::Point2d center = cv::Point2d(0.0, 0.0), int maxIterations = 100) { int nbPoints = imagePts.size(); std::vector<cv::Point2d>::iterator imagePtsIt; std::vector<cv::Point2d> centeredImage; for (imagePtsIt = imagePts.begin(); imagePtsIt != imagePts.end(); ++imagePtsIt) centeredImage.push_back(*imagePtsIt - center); for (imagePtsIt = centeredImage.begin(); imagePtsIt != centeredImage.end(); ++imagePtsIt) { imagePtsIt->x /= focalLength; imagePtsIt->y /= focalLength; } std::vector<double> ui(nbPoints); std::vector<double> vi(nbPoints); std::vector<double> oldUi(nbPoints); std::vector<double> oldVi(nbPoints); std::vector<double> deltaUi(nbPoints); std::vector<double> deltaVi(nbPoints); /*double ui[nbPoints]; double vi[nbPoints]; double oldUi[nbPoints]; double oldVi[nbPoints]; double deltaUi[nbPoints]; double deltaVi[nbPoints];*/ for (int i = 0; i < nbPoints; ++i) { ui[i] = centeredImage[i].x; vi[i] = centeredImage[i].y; } cv::Mat homogeneousWorldPts(nbPoints, 4, CV_32F); for (int i = 0; i < nbPoints; ++i) { cv::Point3f worldPoint = worldPts[i]; homogeneousWorldPts.at<float>(i, 0) = worldPoint.x; homogeneousWorldPts.at<float>(i, 1) = worldPoint.y; homogeneousWorldPts.at<float>(i, 2) = worldPoint.z; homogeneousWorldPts.at<float>(i, 3) = 1; // homogeneous } cv::Mat objectMat; cv::invert(homogeneousWorldPts, objectMat, cv::DECOMP_SVD); /* std::cout << "objectmat: " << std::endl; for (int j = 0; j < 4; ++j) { for (int i = 0; i < nbPoints; ++i) std::cout << objectMat.at<float>(j,i) << " "; std::cout << std::endl; } */ bool converged = false; int iterationCount = 0; double Tx = 0.0; double Ty = 0.0; double Tz = 0.0; double r1T[4]; double r2T[4]; double r1N[4]; double r2N[4]; double r3[4]; while ((!converged) && ((maxIterations < 0) || (iterationCount < maxIterations))) { // r1T= objectMat * ui; % pose vectors // r2T = objectMat * vi; for (int j = 0; j < 4; ++j) { r1T[j] = 0; r2T[j] = 0; for (int i = 0; i < nbPoints; ++i) { r1T[j] += ui[i] * objectMat.at<float>(j, i); r2T[j] += vi[i] * objectMat.at<float>(j, i); } } // Tz1 = 1/sqrt(r1T(1)*r1T(1) + r1T(2)*r1T(2)+ r1T(3)*r1T(3)); % 1/Tz1 is norm of r1T // Tz2 = 1/sqrt(r2T(1)*r2T(1) + r2T(2)*r2T(2)+ r2T(3)*r2T(3)); % 1/Tz2 is norm of r2T double Tz1, Tz2; Tz1 = 1 / sqrt(r1T[0] * r1T[0] + r1T[1] * r1T[1] + r1T[2] * r1T[2]); Tz2 = 1 / sqrt(r2T[0] * r2T[0] + r2T[1] * r2T[1] + r2T[2] * r2T[2]); // Tz = sqrt(Tz1*Tz2); % geometric average instead of arithmetic average of classicPosit.m Tz = sqrt(Tz1*Tz2); for (int j = 0; j < 4; ++j) { r1N[j] = r1T[j] * Tz; r2N[j] = r2T[j] * Tz; } // DEBUG for (int j = 0; j < 3; ++j) { if ((r1N[j] > 1.0) || (r1N[j] < -1.0)) { //std::cout << "WARNING: r1N[" << j << "] == " << r1N[j] << std::endl; r1N[j] = std::max<double>(-1.0, std::min<double>(1.0, r1N[j])); } if ((r2N[j] > 1.0) || (r2N[j] < -1.0)) { //std::cout << "WARNING: r2N[" << j << "] == " << r2N[j] << std::endl; r2N[j] = std::max<double>(-1.0, std::min<double>(1.0, r2N[j])); } } // r1 = r1N(1:3); // r2 = r2N(1:3); // r3 = cross(r1,r2); // r3T= [r3; Tz]; r3[0] = r1N[1] * r2N[2] - r1N[2] * r2N[1]; r3[1] = r1N[2] * r2N[0] - r1N[0] * r2N[2]; r3[2] = r1N[0] * r2N[1] - r1N[1] * r2N[0]; r3[3] = Tz; Tx = r1N[3]; Ty = r2N[3]; // wi= homogeneousWorldPts * r3T /Tz; std::vector<double> wi(nbPoints); //double wi[nbPoints]; for (int i = 0; i < nbPoints; ++i) { wi[i] = 0; for (int j = 0; j < 4; ++j) wi[i] += homogeneousWorldPts.at<float>(i, j) * r3[j] / Tz; } // oldUi = ui; // oldVi = vi; // ui = wi .* centeredImage(:,1) // vi = wi .* centeredImage(:,2) // deltaUi = ui - oldUi; // deltaVi = vi - oldVi; for (int i = 0; i < nbPoints; ++i) { oldUi[i] = ui[i]; oldVi[i] = vi[i]; ui[i] = wi[i] * centeredImage[i].x; vi[i] = wi[i] * centeredImage[i].y; deltaUi[i] = ui[i] - oldUi[i]; deltaVi[i] = vi[i] - oldVi[i]; } // delta = focalLength * focalLength * (deltaUi' * deltaUi + deltaVi' * deltaVi) double delta = 0.0; for (int i = 0; i < nbPoints; ++i) delta += deltaUi[i] * deltaUi[i] + deltaVi[i] * deltaVi[i]; delta = delta*focalLength * focalLength; /* std::cout << "delta: " << delta << std::endl ; std::cout << "r1N: " << r1N[0] << " " << r1N[1] << " " << r1N[2] << " " << r1N[3] << std::endl; std::cout << "r2N: " << r2N[0] << " " << r2N[1] << " " << r2N[2] << " " << r2N[3] << std::endl; std::cout << "r1T: " << r1T[0] << " " << r1T[1] << " " << r1T[2] << " " << r1T[3] << std::endl; std::cout << "r2T: " << r2T[0] << " " << r2T[1] << " " << r2T[2] << " " << r2T[3] << std::endl; std::cout << "r3: " << r3[0] << " " << r3[1] << " " << r3[2] << " " << r3[3] << std::endl; */ // converged = (count>0 & delta < 1) converged = (iterationCount > 0) && (delta < 0.01); ++iterationCount; //std::cout << "delta " << delta << std::endl; } // trans = [Tx; Ty; Tz]; // rot = [r1'; r2'; r3']; //std::cout << "iter count " << iterationCount << std::endl; trans.x = Tx; trans.y = Ty; trans.z = Tz; rot.create(3, 3, CV_32F); for (int i = 0; i < 3; ++i) { rot.at<float>(0, i) = r1N[i]; rot.at<float>(1, i) = r2N[i]; rot.at<float>(2, i) = r3[i]; } } cv::Point2d project(cv::Point3f pt, double focalLength, cv::Point2d imgCenter) { cv::Point2d res; res.x = (pt.x / pt.z) * focalLength + imgCenter.x; res.y = (pt.y / pt.z) * focalLength + imgCenter.y; return res; } cv::Point3f transform(cv::Point3f pt, cv::Mat rot, cv::Point3f trans) { cv::Point3f res; res.x = rot.at<float>(0, 0)*pt.x + rot.at<float>(0, 1)*pt.y + rot.at<float>(0, 2)*pt.z + trans.x; res.y = rot.at<float>(1, 0)*pt.x + rot.at<float>(1, 1)*pt.y + rot.at<float>(1, 2)*pt.z + trans.y; res.z = rot.at<float>(2, 0)*pt.x + rot.at<float>(2, 1)*pt.y + rot.at<float>(2, 2)*pt.z + trans.z; return res; } bool calcCenterScaleAndUp(cv::Mat faceData, std::vector<cv::Point2d> imagePts, double normEyeDist, cv::Point2d &center, double &scale, cv::Point2d &upv) { double focalLength = static_cast<double>(faceData.cols)* 1.5; cv::Point2d imgCenter = cv::Point2d(static_cast<float>(faceData.cols) / 2.0f, static_cast<float>(faceData.rows) / 2.0f); cv::Mat rot; cv::Point3f trans; modernPosit(rot, trans, imagePts, worldPts, focalLength, imgCenter); // project center of the model to the image //cv::Point3f modelCenter = _meanFace3DModel.center_between_eyes(); cv::Point3f modelCenter = cv::Point3f(-0.056, 0.3, -0.530); cv::Point3f rotatedCenter = transform(modelCenter, rot, trans); center = project(rotatedCenter, focalLength, imgCenter); // calc x and y extents of the projection of a sphere // centered at the model center (between eyes) and // with a diameter of the left to right eye distance double modelCenterDist = sqrt(rotatedCenter.x*rotatedCenter.x + rotatedCenter.y*rotatedCenter.y + rotatedCenter.z*rotatedCenter.z); double cameraModel3dYAngle = atan(rotatedCenter.y / sqrt(rotatedCenter.z*rotatedCenter.z + rotatedCenter.x*rotatedCenter.x)); double cameraModel3dXAngle = atan(rotatedCenter.x / sqrt(rotatedCenter.z*rotatedCenter.z + rotatedCenter.y*rotatedCenter.y)); double sphereCenterBorderAngle = asin(0.63 / 2.0 / modelCenterDist); double sphereProjTop = tan(cameraModel3dYAngle - sphereCenterBorderAngle) * focalLength; double sphereProjBottom = tan(cameraModel3dYAngle + sphereCenterBorderAngle) * focalLength; double sphereProjLeft = tan(cameraModel3dXAngle - sphereCenterBorderAngle) * focalLength; double sphereProjRight = tan(cameraModel3dXAngle + sphereCenterBorderAngle) * focalLength; scale = std::max<double>(abs(sphereProjRight - sphereProjLeft), abs(sphereProjBottom - sphereProjTop)) / normEyeDist; // up vector //if (!calcUpvFromEyes(upv)) { // // cout << "upv from pose" << endl; // //cv::Point3f modelCenterUp = _meanFace3DModel.center_between_eyes(); // cv::Point3f modelCenterUp = modelCenter; // modelCenterUp.y += 0.5; // cv::Point3f rotatedCenterUp = transform(modelCenterUp, rot, trans); // cv::Point2d centerUp = project(rotatedCenterUp, focalLength, imgCenter); // upv = centerUp - center; // double upvlen = sqrt(upv.x*upv.x + upv.y*upv.y); // upv.x /= upvlen; // upv.y /= upvlen; //} cv::Point2d lrV = imagePts[1] - imagePts[0]; double vlen = sqrt(lrV.x*lrV.x + lrV.y*lrV.y); upv.x = lrV.y / vlen; upv.y = -lrV.x / vlen; return true; } cv::Rect2d calcRect(cv::Mat faceData, std::vector<cv::Point2d> imagePts) { //cout << "process face " << faceData->ID << endl; // normalized rect double faceRectWidth = 128; double faceRectHeight = 128; double normEyeDist = 50.0; // distance between eyes in normalized rectangle // cv::Point2d targetCenter(64.0f, 37.0f); // center within normalized rectangle cv::Point2d centerOffset(0.0f, 25.0f); // shift from CENTER_BETWEEN_EYES to rectangle center cv::Point2d center; cv::Point2d upv; double scale = 0.0; calcCenterScaleAndUp(faceData, imagePts, normEyeDist, center, scale, upv); /* int x = floor((center.x - targetCenter.x*scale) + 0.5); int y = floor((center.y - targetCenter.y*scale) + 0.5); */ double w = faceRectWidth*scale; double h = faceRectHeight*scale; cv::Point2d rectCenter = center; rectCenter -= cv::Point2d(upv.x*centerOffset.y*scale, upv.y*centerOffset.y*scale); rectCenter -= cv::Point2d(upv.y*centerOffset.x*scale, -upv.x*centerOffset.x*scale); double x = rectCenter.x - faceRectWidth*scale / 2; double y = rectCenter.y - faceRectHeight*scale / 2; return cv::Rect2d(x, y, w, h); } bool strict_weak_ordering(const std::pair<Rect2d, float> a, const std::pair<Rect2d, float> b) { return a.second < b.second; } void NMS(std::vector<std::pair<Rect2d, float>>& rects, double nms_threshold) { sort(rects.begin(), rects.end(), strict_weak_ordering); int remain = rects.size(); do { auto best_rect = rects.end() - 1; remain--; for (auto rect = best_rect - 1; rect<rects.end(); rect++) { if (IoU(rect->first, best_rect->first) > nms_threshold) { rects.erase(rect); remain--; } } } while (remain > 0); } enum IoU_TYPE { IoU_UNION, IoU_MIN, IoU_MAX }; std::vector<int> nms_max(std::vector<std::pair<Rect2d, float>>& rects, double overlap, double min_confidence, IoU_TYPE type = IoU_UNION) { const int n = rects.size(); std::vector<double> areas(n); typedef std::multimap<double, int> ScoreMapper; ScoreMapper map; for (int i = 0; i < n; i++) { map.insert(ScoreMapper::value_type(rects[i].second, i)); areas[i] = rects[i].first.width*rects[i].first.height; } int picked_n = 0; std::vector<int> picked(n); while (map.size() != 0) { auto last_item = map.rbegin(); int last = map.rbegin()->second; // get the index of maximum score value //std::cout << map.rbegin()->first << " " << last << std::endl; picked[picked_n] = last; picked_n++; for (ScoreMapper::iterator it = map.begin(); it != map.end();) { int idx = it->second; if (idx == last) { ScoreMapper::iterator tmp = it; tmp++; map.erase(it); it = tmp; continue; } double x1 = std::max<double>(rects[idx].first.x, rects[last].first.x); double x2 = std::min<double>(rects[idx].first.x + rects[idx].first.width, rects[last].first.x + rects[last].first.width); double w = x2 - x1; if (w <= 0) { it++; continue; } double y1 = std::max<double>(rects[idx].first.y, rects[last].first.y); double y2 = std::min<double>(rects[idx].first.y + rects[idx].first.height, rects[last].first.y + rects[last].first.height); double h = y2 - y1; if (h <= 0) { it++; continue; } double ov; switch (type) { case IoU_MAX: ov = w*h / max(areas[idx], areas[last]); break; case IoU_MIN: ov = w*h / min(areas[idx], areas[last]); break; case IoU_UNION: default: ov = w*h / (areas[idx] + areas[last] - w*h); break; } if (ov > overlap) { ScoreMapper::iterator tmp = it; tmp++; map.erase(it); it = tmp; } else { it++; } } } picked.resize(picked_n); return picked; } enum WEIGHT_TYPE { WEIGHT_LINEAR, WEIGHT_GAUSSIAN, WEIGHT_ORIGINAL }; std::vector<int> soft_nms_max(std::vector<std::pair<Rect2d, float>>& rects, double overlap, double min_confidence, IoU_TYPE iou_type = IoU_UNION, WEIGHT_TYPE weight_type = WEIGHT_LINEAR) { const int n = rects.size(); std::vector<double> areas(n); typedef std::multimap<double, int> ScoreMapper; ScoreMapper map; for (int i = 0; i < n; i++) { map.insert(ScoreMapper::value_type(rects[i].second, i)); areas[i] = rects[i].first.width*rects[i].first.height; } int picked_n = 0; std::vector<int> picked(n); while (map.size() != 0) { auto last_item = map.rbegin(); int last = map.rbegin()->second; // get the index of maximum score value //std::cout << map.rbegin()->first << " " << last << std::endl; picked[picked_n] = last; picked_n++; for (ScoreMapper::iterator it = map.begin(); it != map.end();) { int idx = it->second; if (idx == last) { ScoreMapper::iterator tmp = it; tmp++; map.erase(it); it = tmp; continue; } double x1 = std::max<double>(rects[idx].first.x, rects[last].first.x); double x2 = std::min<double>(rects[idx].first.x + rects[idx].first.width, rects[last].first.x + rects[last].first.width); double w = x2 - x1; if (w <= 0) { it++; continue; } double y1 = std::max<double>(rects[idx].first.y, rects[last].first.y); double y2 = std::min<double>(rects[idx].first.y + rects[idx].first.height, rects[last].first.y + rects[last].first.height); double h = y2 - y1; if (h <= 0) { it++; continue; } double ov; switch (iou_type) { case IoU_MAX: ov = w*h / max(areas[idx], areas[last]); break; case IoU_MIN: ov = w*h / min(areas[idx], areas[last]); break; case IoU_UNION: default: ov = w*h / (areas[idx] + areas[last] - w*h); break; } double weight = 1.0; switch (weight_type) { case WEIGHT_LINEAR: if (ov > overlap) { weight = 1.0 - ov; } else { weight = 1.0; } break; case WEIGHT_GAUSSIAN: weight = exp((-ov*ov) / 0.5); break; default: case WEIGHT_ORIGINAL: if (ov > overlap) { weight = 0.0; } else { weight = 1.0; } break; } rects[idx].second *= weight; if (rects[idx].second < min_confidence) { ScoreMapper::iterator tmp = it; tmp++; map.erase(it); it = tmp; } else { it++; } } } picked.resize(picked_n); return picked; } std::vector<int> nms_avg(std::vector<std::pair<Rect2d, float>>& rects, double overlap) { const int n = rects.size(); std::vector<double> areas(n); typedef std::multimap<double, int> ScoreMapper; ScoreMapper map; for (int i = 0; i < n; i++) { map.insert(ScoreMapper::value_type(rects[i].second, i)); areas[i] = rects[i].first.width*rects[i].first.height; } int picked_n = 0; std::vector<int> picked(n); while (map.size() != 0) { int last = map.rbegin()->second; // get the index of maximum score value picked[picked_n] = last; picked_n++; int overlap_count = 1; double mean_x = rects[last].first.x, mean_y = rects[last].first.y, mean_w = log(rects[last].first.width), mean_h = log(rects[last].first.height); //double mean_x = rects[last].first.x, mean_y = rects[last].first.y, mean_w = rects[last].first.width, mean_h = rects[last].first.height; for (ScoreMapper::iterator it = map.begin(); it != map.end();) { int idx = it->second; double x1 = std::max<double>(rects[idx].first.x, rects[last].first.x); double y1 = std::max<double>(rects[idx].first.y, rects[last].first.y); double x2 = std::min<double>(rects[idx].first.x + rects[idx].first.width, rects[last].first.x + rects[last].first.width); double y2 = std::min<double>(rects[idx].first.y + rects[idx].first.height, rects[last].first.y + rects[last].first.height); double w = std::max<double>(0., x2 - x1); double h = std::max<double>(0., y2 - y1); double ov = w*h / (areas[idx] + areas[last] - w*h); if (ov > overlap) { ScoreMapper::iterator tmp = it; tmp++; map.erase(it); it = tmp; if (rects[idx].second > rects[last].second * 0.9) { mean_x += rects[idx].first.x; mean_y += rects[idx].first.y; mean_w += log(rects[idx].first.width); mean_h += log(rects[idx].first.height); //mean_w += rects[idx].first.width; //mean_h += rects[idx].first.height; overlap_count++; } } else { it++; } } rects[last].first.x = mean_x / overlap_count; rects[last].first.y = mean_y / overlap_count; rects[last].first.width = exp(mean_w / overlap_count); rects[last].first.height = exp(mean_h / overlap_count); //rects[last].first.width = mean_w / overlap_count; //rects[last].first.height = mean_h / overlap_count; } picked.resize(picked_n); return picked; } inline bool checkRect(Rect2d rect, Size image_size) { if (rect.x < 0 || rect.y < 0 || rect.width <= 0 || rect.height <= 0 || (rect.x + rect.width) > (image_size.width - 1) || (rect.y + rect.height) > (image_size.height - 1)) return false; else return true; } inline bool fixRect(Rect2d& rect, Size image_size, bool only_center = false) { if (rect.width <= 0 || rect.height <= 0) return false; if (only_center) { Point2d center = Point2d(rect.x + rect.width / 2, rect.y + rect.height / 2); center.x = max<double>(center.x, 0.0); center.y = max<double>(center.y, 0.0); center.x = min<double>(center.x, image_size.width - 1); center.y = min<double>(center.y, image_size.height - 1); rect.x = center.x - rect.width / 2; rect.y = center.y - rect.height / 2; } else { rect.x = max<double>(rect.x, 0.0); rect.y = max<double>(rect.y, 0.0); rect.width = min<double>(rect.width, image_size.width - 1 - rect.x); rect.height = min<double>(rect.height, image_size.height - 1 - rect.y); } return true; } inline void make_rect_square(Rect2d& rect) { double max_len = max(rect.width, rect.height); rect.x += rect.width / 2 - max_len / 2; rect.y += rect.height / 2 - max_len / 2; rect.width = rect.height = max_len; } double IoU(Rect2d rect, RotatedRect ellip, Size image_size = Size(0,0)) { Rect2d baseRect,ellip_br = ellip.boundingRect(); baseRect.x = min(rect.x, ellip_br.x); baseRect.y = min(rect.y, ellip_br.y); baseRect.width = max(rect.x + rect.width - baseRect.x, ellip_br.x + ellip_br.width - baseRect.x); baseRect.height = max(rect.y + rect.height - baseRect.y, ellip_br.y + ellip_br.height - baseRect.y); baseRect.x -= 10; baseRect.y -= 10; baseRect.width += 20; baseRect.height += 20; if (image_size.width != 0) fixRect(baseRect,image_size); rect.x -= baseRect.x; rect.y -= baseRect.y; ellip.center.x -= baseRect.x; ellip.center.y -= baseRect.y; Mat rect_image = Mat::zeros(Size(baseRect.width, baseRect.height), CV_8UC1); Mat ellipse_image = Mat::zeros(Size(baseRect.width, baseRect.height), CV_8UC1); rectangle(rect_image, rect, Scalar(255), -1); ellipse(ellipse_image, ellip, Scalar(255), -1); Mat Overlap = rect_image.mul(ellipse_image); //imshow("rect", rect_image); //imshow("ellipse", ellipse_image); //imshow("overlap", Overlap); //waitKey(0); double rect_size = countNonZero(rect_image);//rect.width * rect.height; double ellipse_size = countNonZero(ellipse_image); double overlap = countNonZero(Overlap); return overlap / (rect_size + ellipse_size - overlap); } /** @brief Applies an affine transformation to an image. The function warpAffine transforms the source image using the specified matrix: \f[\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\f] when the flag WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with cv::invertAffineTransform and then put in the formula above instead of M. The function cannot operate in-place. @param src input image. @param dst output image that has the size dsize and the same type as src . @param M \f$2\times 3\f$ transformation matrix. @param dsize size of the output image. @param flags combination of interpolation methods (see cv::InterpolationFlags) and the optional flag WARP_INVERSE_MAP that means that M is the inverse transformation ( \f$\texttt{dst}\rightarrow\texttt{src}\f$ ). @param borderMode pixel extrapolation method (see cv::BorderTypes); when borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to the "outliers" in the source image are not modified by the function. @param borderValue value used in case of a constant border; by default, it is 0. @sa warpPerspective, resize, remap, getRectSubPix, transform */ Mat cropImage(Mat& input_image, Rect2d roi, Size2d target_size, int flags = 1, int borderMode = 0, Scalar& borderValue = Scalar(0)) { //Point2f srcPoints[4] = { Point2f(roi.x, roi.y), Point2f(roi.x + roi.width,roi.y), Point2f(roi.x,roi.y + roi.height),Point2f(roi.x + roi.width,roi.y + roi.height) }; //Point2f dstPoints[4] = { Point2f(0,0), Point2f(target_size.width,0),Point2f(0, target_size.height), Point2f(target_size.width, target_size.height) }; //Mat M2 = getAffineTransform(srcPoints, dstPoints); //std::cout << M2 << std::endl; Mat M = (Mat_<float>(2, 3) << target_size.width / roi.width, 0, -roi.x*target_size.width / roi.width, 0, target_size.height / roi.height, -roi.y*target_size.height / roi.height); //std::cout << M << std::endl; Mat result; warpAffine(input_image, result, M, target_size, flags, borderMode, borderValue); return result; } //Only support 2 scaling Mat getPyramidStitchingImage2(Mat& input_image, std::vector<std::pair<Rect, double>>& location_and_scale, double scaling = 0.707, Scalar background_color = Scalar(0,0,0), int min_side = 12, int interval = 2) { using namespace std; bool stitch_x = input_image.cols < input_image.rows; Size current_size = input_image.size(); Point left_top = Point(0, 0); int width, height; if (stitch_x) { width = ceil(input_image.cols * (1 + scaling)) + interval * 2; height = 0; map<int, pair<int, int>> height_index; // (width_start, width, height) height_index[0] = pair<int, int>(width, 0); do { int min_h = INT_MAX, min_start = 0; for (auto h : height_index) { if (h.second.first > current_size.width + interval && h.second.second < min_h) { min_h = h.second.second; min_start = h.first; } } location_and_scale.push_back(make_pair(Rect(min_start, height_index[min_start].second, current_size.width, current_size.height), (double)current_size.height / (double)input_image.rows)); height_index[min_start + current_size.width + interval] = height_index[min_start]; height_index[min_start + current_size.width + interval].first -= current_size.width + interval; height_index[min_start].first = current_size.width; height_index[min_start].second += current_size.height + interval; if (height_index[min_start].second > height) height = height_index[min_start].second; //for (auto h : height_index) { // cout << h.first << " " << h.second.first << " " << h.second.second << endl; //} //cout << "===================="<<endl; current_size.width *= scaling; current_size.height *= scaling; } while (current_size.width > min_side); height += interval; } else { height = ceil(input_image.rows * (1 + scaling)) + interval * 2; width = 0; map<int, pair<int, int>> width_index; // (height_start, height, width) width_index[0] = pair<int, int>(height, 0); do { int min_w = INT_MAX, min_start = 0; for (auto w : width_index) { if (w.second.first > current_size.height + interval && w.second.second < min_w) { min_w = w.second.second; min_start = w.first; } } location_and_scale.push_back(make_pair(Rect(width_index[min_start].second, min_start, current_size.width, current_size.height), (double)current_size.width / (double)input_image.cols)); width_index[min_start + current_size.height + interval] = width_index[min_start]; width_index[min_start + current_size.height + interval].first -= current_size.height + interval; width_index[min_start].first = current_size.height; width_index[min_start].second += current_size.width + interval; if (width_index[min_start].second > width) width = width_index[min_start].second; //for (auto h : width_index) { // cout << h.first << " " << h.second.first << " " << h.second.second << endl; //} //cout << "====================" << endl; current_size.width *= scaling; current_size.height *= scaling; } while (current_size.height > min_side); width += interval; } Mat big_image = Mat::zeros(height, width, input_image.type()); big_image = background_color; Mat resized_image = input_image; //std::chrono::time_point<std::chrono::system_clock> t0 = std::chrono::system_clock::now(); //#pragma omp parallel for // not very fast. for (auto ls : location_and_scale) { resize(resized_image, resized_image, Size(ls.first.width, ls.first.height)); resized_image.copyTo(big_image(ls.first)); } //cout << "resize time:" << (float)std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now() - t0).count() / 1000 << "ms" << endl; return big_image; } cv::Mat findNonReflectiveTransform(std::vector<cv::Point2d> source_points, std::vector<cv::Point2d> target_points, Mat& Tinv = Mat()) { assert(source_points.size() == target_points.size()); assert(source_points.size() >= 2); Mat U = Mat::zeros(target_points.size() * 2, 1, CV_64F); Mat X = Mat::zeros(source_points.size() * 2, 4, CV_64F); for (int i = 0; i < target_points.size(); i++) { U.at<double>(i * 2, 0) = source_points[i].x; U.at<double>(i * 2 + 1, 0) = source_points[i].y; X.at<double>(i * 2, 0) = target_points[i].x; X.at<double>(i * 2, 1) = target_points[i].y; X.at<double>(i * 2, 2) = 1; X.at<double>(i * 2, 3) = 0; X.at<double>(i * 2 + 1, 0) = target_points[i].y; X.at<double>(i * 2 + 1, 1) = -target_points[i].x; X.at<double>(i * 2 + 1, 2) = 0; X.at<double>(i * 2 + 1, 3) = 1; } Mat r = X.inv(DECOMP_SVD)*U; Tinv = (Mat_<double>(3, 3) << r.at<double>(0), -r.at<double>(1), 0, r.at<double>(1), r.at<double>(0), 0, r.at<double>(2), r.at<double>(3), 1); Mat T = Tinv.inv(DECOMP_SVD); Tinv = Tinv(Rect(0, 0, 2, 3)).t(); return T(Rect(0,0,2,3)).t(); } cv::Mat findSimilarityTransform(std::vector<cv::Point2d> source_points, std::vector<cv::Point2d> target_points, Mat& Tinv = Mat()) { Mat Tinv1, Tinv2; Mat trans1 = findNonReflectiveTransform(source_points, target_points, Tinv1); std::vector<Point2d> source_point_reflect; for (auto sp : source_points) { source_point_reflect.push_back(Point2d(-sp.x, sp.y)); } Mat trans2 = findNonReflectiveTransform(source_point_reflect, target_points, Tinv2); trans2.colRange(0,1) *= -1; std::vector<Point2d> trans_points1, trans_points2; transform(source_points, trans_points1, trans1); transform(source_points, trans_points2, trans2); double norm1 = norm(Mat(trans_points1), Mat(target_points), NORM_L2); double norm2 = norm(Mat(trans_points2), Mat(target_points), NORM_L2); Tinv = norm1 < norm2 ? Tinv1 : Tinv2; return norm1 < norm2 ? trans1 : trans2; } std::vector<cv::Point2d> getVertexFromBox(Rect2d box) { return{ Point2d(box.x, box.y), Point2d(box.x + box.width, box.y), Point2d(box.x + box.width, box.y + box.height), Point2d(box.x, box.y + box.height) }; } }
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif argc = 4; if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = 1; // atoi(argv[1]); char *inputFile = "/BKI.TXT"; // argv[2]; char *outputFile = "/OUT.TXT"; // argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
HelloWorld.c
#include <stdio.h> #include <omp.h> int main () { int nthreads, tid; #pragma omp parallel num_threads(20) private(tid) { tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if(tid == 0){ nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } return 0; }
15_primes-par1.c
#include <stdio.h> #include <stdlib.h> #include <math.h> int main(int argc, char **argv) { // quantos numeros primos entre 1 e N ? unsigned long n = 99999; unsigned long aux = 2; unsigned long primes = 0; //Sem escalonamento : Por default - escalonamento estático N-blocos=n_threads #pragma omp parallel for firstprivate(aux) reduction(+:primes) for (unsigned long i = 2; i < n; i++) { while (aux < i) { if (i % aux == 0) break; aux++; } if (aux == i) primes++; aux = 2; } printf("%lu primos entre 1 e %lu\n",primes,n); return 0; }
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint8) // C=scalar+B GB (_bind1st__pow_uint8) // C=scalar+B' GB (_bind1st_tran__pow_uint8) // C=A+scalar GB (_bind2nd__pow_uint8) // C=A'+scalar GB (_bind2nd_tran__pow_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
edge_vol_int.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" #include <libxsmm_intrinsics_x86.h> #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /*#define EDGE_HP_1G*/ /*#define HANDLE_AMOK*/ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif LIBXSMM_INLINE void* edge_hp_malloc( size_t nbytes, size_t alignment ) { void* ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if ( nbytes > num_large_pages*1073741824L ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if ( nbytes > num_large_pages*2097152UL ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc( nbytes, alignment ); #endif return ret_ptr; } LIBXSMM_INLINE void edge_hp_free( void* ptr, size_t nbytes ) { LIBXSMM_UNUSED( nbytes ); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free( ptr ); #endif } #if defined(__AVX512F__) LIBXSMM_INLINE void matMulFusedAC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_b[l_k*i_ldB + l_n] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } LIBXSMM_INLINE void matMulFusedBC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_a[l_m*i_ldA + l_k] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } #endif LIBXSMM_INLINE void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { time_avg += i_runtimes[8*i]; } } time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */ io_amoks[8*i_workers]++; io_amoks[8*i] = 1; } } } } LIBXSMM_INLINE void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8*i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8*i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for ( l_z = 0; l_z < i_mytid; l_z++) { if ( i_amoks[8*l_z] != 0 ) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char* argv[]) { char* mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double* q; double* qt; double* qs; double* star; double* global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants*num_quants; double* l_total_thread; double* l_cur_thread_time; double time_max; double time_min; double time_avg; size_t* amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double)); l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double)); amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t)); for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm; b_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm; c_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm; st_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_star, (unsigned int)num_cfr, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm; if ( a_kernel == 0 ) { printf("a kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("b kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("c kernel could not be built -> exit!"); exit(-1); } if ( st_kernel == 0 ) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 ); memcpy( onegcode, (void*) a_kernel, 1505 ); memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 ); memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 ); memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 ); a_kernel = (libxsmm_dmmfunction)onegcode; b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64); c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128); st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) ); star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) ); global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz*3; j++) { star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes*num_modes; j++) { global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8*l_num_threads]) { cur_amoks = amoks[8*l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) ); a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) ); b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #else matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend ); l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend ); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect( l_cur_thread_time, amoks, l_num_threads ); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if( amoks[8*i] == 0 ) { if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i]; if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i]; time_avg += l_total_thread[8*i]; } } time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads])); flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]); for ( i = 0; i < (int)l_num_threads; i++ ) { if ( amoks[8*i] != 0 ) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
diagmm_x_sky_n_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c < n; c++) alpha_mul(Y[c], Y[c], beta); ALPHA_INT idx = mat->pointers[r + 1] - 1; ALPHA_Number val; alpha_mul(val, alpha, mat->values[idx]); const ALPHA_Number *X = &x[index2(r, 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } return ALPHA_SPARSE_STATUS_SUCCESS; }
c_pi.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_pi.c VERSION: 1.0 DATE: May 2004 COMMENTS TO: sande@csi.ull.es DESCRIPTION: Parallel implementation of PI generator using OpenMP COMMENTS: The area under the curve y=4/(1+x*x) between 0 and 1 provides a way to compute Pi The value of this integral can be approximated using a sum. REFERENCES: http://en.wikipedia.org/wiki/Pi http://nereida.deioc.ull.es/~llCoMP/examples/examples/pi/pi_description.html BASIC PRAGMAS: parallel USAGE: ./c_pi.par INPUT: Default precision OUTPUT: The value of PI FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ #include "OmpSCR.h" #define DEFAULT_PREC 1000000 /* Default precision */ #define NUM_ARGS 1 #define NUM_TIMERS 1 int main(int argc, char *argv[]) { double PI25DT = 3.141592653589793238462643; double local, w, total_time, pi; long i, N; /* Precision */ int NUMTHREADS; char *PARAM_NAMES[NUM_ARGS] = {"Precision"}; char *DEFAULTS_VALUE[NUM_ARGS] = {"1000000"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; /* Default: DEFAULT_PREC; */ NUMTHREADS = omp_get_max_threads(); OSCR_init (NUMTHREADS, "Pi generator", "Param: precission", NUM_ARGS, PARAM_NAMES, DEFAULTS_VALUE , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); N = OSCR_getarg_int(NUM_ARGS); OSCR_timer_start(0); w = 1.0 / N; pi = 0.0; /* #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) schedule(static, 1) */ #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) for(i = 0; i < N; i++) { local = (i + 0.5) * w; pi += 4.0 / (1.0 + local * local); } pi *= w; OSCR_timer_stop(0); total_time = OSCR_timer_read(0); OSCR_report(); printf("\n \t# THREADS INTERVAL \tTIME (secs.) \tPI \t\t\tERROR\n"); printf("\t %d \t%10ld \t%14.6lf \t%1.20f\t%g\n", NUMTHREADS, N, total_time, pi, PI25DT-pi); return 0; } /* * vim:ts=2:sw=2: */